webscout 8.3__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (120) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/AIutel.py +46 -53
  4. webscout/Bing_search.py +418 -0
  5. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  6. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  7. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  8. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  9. webscout/Extra/gguf.py +706 -177
  10. webscout/Litlogger/formats.py +9 -0
  11. webscout/Litlogger/handlers.py +18 -0
  12. webscout/Litlogger/logger.py +43 -1
  13. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  14. webscout/Provider/AISEARCH/scira_search.py +3 -2
  15. webscout/Provider/GeminiProxy.py +140 -0
  16. webscout/Provider/LambdaChat.py +7 -1
  17. webscout/Provider/MCPCore.py +78 -75
  18. webscout/Provider/OPENAI/BLACKBOXAI.py +1046 -1017
  19. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  20. webscout/Provider/OPENAI/Qwen3.py +303 -303
  21. webscout/Provider/OPENAI/README.md +5 -0
  22. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  23. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  24. webscout/Provider/OPENAI/__init__.py +16 -1
  25. webscout/Provider/OPENAI/autoproxy.py +332 -0
  26. webscout/Provider/OPENAI/base.py +101 -14
  27. webscout/Provider/OPENAI/chatgpt.py +15 -2
  28. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  29. webscout/Provider/OPENAI/deepinfra.py +339 -328
  30. webscout/Provider/OPENAI/e2b.py +295 -74
  31. webscout/Provider/OPENAI/mcpcore.py +109 -70
  32. webscout/Provider/OPENAI/opkfc.py +18 -6
  33. webscout/Provider/OPENAI/scirachat.py +59 -50
  34. webscout/Provider/OPENAI/toolbaz.py +2 -10
  35. webscout/Provider/OPENAI/writecream.py +166 -166
  36. webscout/Provider/OPENAI/x0gpt.py +367 -367
  37. webscout/Provider/OPENAI/xenai.py +514 -0
  38. webscout/Provider/OPENAI/yep.py +389 -383
  39. webscout/Provider/STT/__init__.py +3 -0
  40. webscout/Provider/STT/base.py +281 -0
  41. webscout/Provider/STT/elevenlabs.py +265 -0
  42. webscout/Provider/TTI/__init__.py +4 -1
  43. webscout/Provider/TTI/aiarta.py +399 -365
  44. webscout/Provider/TTI/base.py +74 -2
  45. webscout/Provider/TTI/bing.py +231 -0
  46. webscout/Provider/TTI/fastflux.py +63 -30
  47. webscout/Provider/TTI/gpt1image.py +149 -0
  48. webscout/Provider/TTI/imagen.py +196 -0
  49. webscout/Provider/TTI/magicstudio.py +60 -29
  50. webscout/Provider/TTI/piclumen.py +43 -32
  51. webscout/Provider/TTI/pixelmuse.py +232 -225
  52. webscout/Provider/TTI/pollinations.py +43 -32
  53. webscout/Provider/TTI/together.py +287 -0
  54. webscout/Provider/TTI/utils.py +2 -1
  55. webscout/Provider/TTS/README.md +1 -0
  56. webscout/Provider/TTS/__init__.py +2 -1
  57. webscout/Provider/TTS/freetts.py +140 -0
  58. webscout/Provider/TTS/speechma.py +45 -39
  59. webscout/Provider/TogetherAI.py +366 -0
  60. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  61. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  62. webscout/Provider/XenAI.py +324 -0
  63. webscout/Provider/__init__.py +8 -0
  64. webscout/Provider/deepseek_assistant.py +378 -0
  65. webscout/Provider/scira_chat.py +3 -2
  66. webscout/Provider/toolbaz.py +0 -1
  67. webscout/auth/__init__.py +44 -0
  68. webscout/auth/api_key_manager.py +189 -0
  69. webscout/auth/auth_system.py +100 -0
  70. webscout/auth/config.py +76 -0
  71. webscout/auth/database.py +400 -0
  72. webscout/auth/exceptions.py +67 -0
  73. webscout/auth/middleware.py +248 -0
  74. webscout/auth/models.py +130 -0
  75. webscout/auth/providers.py +257 -0
  76. webscout/auth/rate_limiter.py +254 -0
  77. webscout/auth/request_models.py +127 -0
  78. webscout/auth/request_processing.py +226 -0
  79. webscout/auth/routes.py +526 -0
  80. webscout/auth/schemas.py +103 -0
  81. webscout/auth/server.py +312 -0
  82. webscout/auth/static/favicon.svg +11 -0
  83. webscout/auth/swagger_ui.py +203 -0
  84. webscout/auth/templates/components/authentication.html +237 -0
  85. webscout/auth/templates/components/base.html +103 -0
  86. webscout/auth/templates/components/endpoints.html +750 -0
  87. webscout/auth/templates/components/examples.html +491 -0
  88. webscout/auth/templates/components/footer.html +75 -0
  89. webscout/auth/templates/components/header.html +27 -0
  90. webscout/auth/templates/components/models.html +286 -0
  91. webscout/auth/templates/components/navigation.html +70 -0
  92. webscout/auth/templates/static/api.js +455 -0
  93. webscout/auth/templates/static/icons.js +168 -0
  94. webscout/auth/templates/static/main.js +784 -0
  95. webscout/auth/templates/static/particles.js +201 -0
  96. webscout/auth/templates/static/styles.css +3353 -0
  97. webscout/auth/templates/static/ui.js +374 -0
  98. webscout/auth/templates/swagger_ui.html +170 -0
  99. webscout/client.py +49 -3
  100. webscout/litagent/Readme.md +12 -3
  101. webscout/litagent/agent.py +99 -62
  102. webscout/scout/core/scout.py +104 -26
  103. webscout/scout/element.py +139 -18
  104. webscout/swiftcli/core/cli.py +14 -3
  105. webscout/swiftcli/decorators/output.py +59 -9
  106. webscout/update_checker.py +31 -49
  107. webscout/version.py +1 -1
  108. webscout/webscout_search.py +4 -12
  109. webscout/webscout_search_async.py +3 -10
  110. webscout/yep_search.py +2 -11
  111. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  112. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/RECORD +116 -68
  113. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  114. webscout/Provider/HF_space/__init__.py +0 -0
  115. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  116. webscout/Provider/OPENAI/api.py +0 -1035
  117. webscout/Provider/TTI/artbit.py +0 -0
  118. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  119. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  120. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,328 @@
1
+ import json
2
+ import time
3
+ import uuid
4
+ import base64
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ import requests
8
+ from uuid import uuid4
9
+
10
+ # Import base classes and utility structures
11
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from .utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage, count_tokens
15
+ )
16
+
17
+ from webscout.litagent import LitAgent
18
+ from webscout import exceptions
19
+
20
+
21
+ class Completions(BaseCompletions):
22
+ def __init__(self, client: 'GeminiProxy'):
23
+ self._client = client
24
+
25
+ def create(
26
+ self,
27
+ *,
28
+ model: str,
29
+ messages: List[Dict[str, str]],
30
+ max_tokens: Optional[int] = None,
31
+ stream: bool = False,
32
+ temperature: Optional[float] = None,
33
+ top_p: Optional[float] = None,
34
+ timeout: Optional[int] = None,
35
+ proxies: Optional[dict] = None,
36
+ **kwargs: Any
37
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
+ """
39
+ Create a chat completion with GeminiProxy API.
40
+
41
+ Args:
42
+ model: The model to use (from AVAILABLE_MODELS)
43
+ messages: List of message dictionaries with 'role' and 'content'
44
+ max_tokens: Maximum number of tokens to generate
45
+ stream: Whether to stream the response (not supported by GeminiProxy)
46
+ temperature: Sampling temperature (0-1)
47
+ top_p: Nucleus sampling parameter (0-1)
48
+ timeout: Request timeout in seconds
49
+ proxies: Proxy configuration
50
+ **kwargs: Additional parameters to pass to the API
51
+
52
+ Returns:
53
+ If stream=False, returns a ChatCompletion object
54
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
55
+ """
56
+ # Generate request ID and timestamp
57
+ request_id = str(uuid.uuid4())
58
+ created_time = int(time.time())
59
+
60
+ # Extract image URL from kwargs if present
61
+ img_url = kwargs.get('img_url')
62
+
63
+ # Convert messages to GeminiProxy format
64
+ conversation_prompt = self._format_messages(messages)
65
+
66
+ # Prepare parts for the request
67
+ parts = []
68
+ if img_url:
69
+ parts.append({"inline_data": self._get_image(img_url, proxies, timeout)})
70
+ parts.append({"text": conversation_prompt})
71
+
72
+ # Prepare the payload
73
+ payload = {
74
+ "model": model,
75
+ "contents": [{"parts": parts}]
76
+ }
77
+
78
+ # GeminiProxy doesn't support streaming, so we always return non-streaming
79
+ if stream:
80
+ return self._create_streaming_fallback(
81
+ request_id=request_id,
82
+ created_time=created_time,
83
+ model=model,
84
+ payload=payload,
85
+ timeout=timeout,
86
+ proxies=proxies
87
+ )
88
+
89
+ # Non-streaming implementation
90
+ return self._create_non_streaming(
91
+ request_id=request_id,
92
+ created_time=created_time,
93
+ model=model,
94
+ payload=payload,
95
+ timeout=timeout,
96
+ proxies=proxies
97
+ )
98
+
99
+ def _format_messages(self, messages: List[Dict[str, str]]) -> str:
100
+ """Convert OpenAI messages format to a single conversation prompt."""
101
+ formatted_parts = []
102
+
103
+ for message in messages:
104
+ role = message.get("role", "")
105
+ content = message.get("content", "")
106
+
107
+ if role == "system":
108
+ formatted_parts.append(f"System: {content}")
109
+ elif role == "user":
110
+ formatted_parts.append(f"User: {content}")
111
+ elif role == "assistant":
112
+ formatted_parts.append(f"Assistant: {content}")
113
+
114
+ return "\n".join(formatted_parts)
115
+
116
+ def _get_image(self, img_url: str, proxies: Optional[dict] = None, timeout: Optional[int] = None) -> Dict[str, str]:
117
+ """Fetch and encode image from URL."""
118
+ try:
119
+ session = requests.Session()
120
+ if proxies:
121
+ session.proxies.update(proxies)
122
+
123
+ response = session.get(
124
+ img_url,
125
+ stream=True,
126
+ timeout=timeout or self._client.timeout
127
+ )
128
+ response.raise_for_status()
129
+
130
+ mime_type = response.headers.get("content-type", "application/octet-stream")
131
+ data = base64.b64encode(response.content).decode("utf-8")
132
+ return {"mime_type": mime_type, "data": data}
133
+ except Exception as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Error fetching image: {e}")
135
+
136
+ def _create_non_streaming(
137
+ self,
138
+ *,
139
+ request_id: str,
140
+ created_time: int,
141
+ model: str,
142
+ payload: Dict[str, Any],
143
+ timeout: Optional[int] = None,
144
+ proxies: Optional[dict] = None
145
+ ) -> ChatCompletion:
146
+ """Implementation for non-streaming chat completions."""
147
+ original_proxies = self._client.session.proxies.copy()
148
+ if proxies is not None:
149
+ self._client.session.proxies.update(proxies)
150
+
151
+ try:
152
+ response = self._client.session.post(
153
+ self._client.base_url,
154
+ json=payload,
155
+ headers=self._client.headers,
156
+ timeout=timeout if timeout is not None else self._client.timeout
157
+ )
158
+ response.raise_for_status()
159
+ data = response.json()
160
+
161
+ # Extract content from GeminiProxy response
162
+ content = self._extract_content(data)
163
+
164
+ # Create the completion message
165
+ message = ChatCompletionMessage(
166
+ role="assistant",
167
+ content=content
168
+ )
169
+
170
+ # Create the choice
171
+ choice = Choice(
172
+ index=0,
173
+ message=message,
174
+ finish_reason="stop"
175
+ )
176
+
177
+ # Estimate token usage
178
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("contents", [{}])[0].get("parts", [{}])])
179
+ completion_tokens = count_tokens(content)
180
+ usage = CompletionUsage(
181
+ prompt_tokens=prompt_tokens,
182
+ completion_tokens=completion_tokens,
183
+ total_tokens=prompt_tokens + completion_tokens
184
+ )
185
+
186
+ # Create the completion object
187
+ completion = ChatCompletion(
188
+ id=request_id,
189
+ choices=[choice],
190
+ created=created_time,
191
+ model=model,
192
+ usage=usage,
193
+ )
194
+
195
+ return completion
196
+
197
+ except Exception as e:
198
+ raise exceptions.FailedToGenerateResponseError(f"GeminiProxy request failed: {e}")
199
+ finally:
200
+ if proxies is not None:
201
+ self._client.session.proxies = original_proxies
202
+
203
+ def _create_streaming_fallback(
204
+ self,
205
+ *,
206
+ request_id: str,
207
+ created_time: int,
208
+ model: str,
209
+ payload: Dict[str, Any],
210
+ timeout: Optional[int] = None,
211
+ proxies: Optional[dict] = None
212
+ ) -> Generator[ChatCompletionChunk, None, None]:
213
+ """Fallback streaming implementation that simulates streaming from non-streaming response."""
214
+ # Get the full response first
215
+ completion = self._create_non_streaming(
216
+ request_id=request_id,
217
+ created_time=created_time,
218
+ model=model,
219
+ payload=payload,
220
+ timeout=timeout,
221
+ proxies=proxies
222
+ )
223
+
224
+ # Simulate streaming by yielding chunks
225
+ content = completion.choices[0].message.content
226
+ if content:
227
+ # Split content into chunks (simulate streaming)
228
+ chunk_size = max(1, len(content) // 10) # Split into ~10 chunks
229
+ for i in range(0, len(content), chunk_size):
230
+ chunk_content = content[i:i + chunk_size]
231
+
232
+ delta = ChoiceDelta(content=chunk_content)
233
+ choice = Choice(index=0, delta=delta, finish_reason=None)
234
+
235
+ chunk = ChatCompletionChunk(
236
+ id=request_id,
237
+ choices=[choice],
238
+ created=created_time,
239
+ model=model
240
+ )
241
+
242
+ yield chunk
243
+
244
+ # Final chunk with finish_reason
245
+ delta = ChoiceDelta(content=None)
246
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
247
+ chunk = ChatCompletionChunk(
248
+ id=request_id,
249
+ choices=[choice],
250
+ created=created_time,
251
+ model=model
252
+ )
253
+
254
+ yield chunk
255
+
256
+ def _extract_content(self, response: dict) -> str:
257
+ """Extract content from GeminiProxy response."""
258
+ try:
259
+ return response['candidates'][0]['content']['parts'][0]['text']
260
+ except (KeyError, IndexError, TypeError):
261
+ return str(response)
262
+
263
+
264
+ class Chat(BaseChat):
265
+ def __init__(self, client: 'GeminiProxy'):
266
+ self.completions = Completions(client)
267
+
268
+
269
+ class GeminiProxy(OpenAICompatibleProvider):
270
+ """
271
+ OpenAI-compatible client for GeminiProxy API.
272
+
273
+ Usage:
274
+ client = GeminiProxy()
275
+ response = client.chat.completions.create(
276
+ model="gemini-2.0-flash-lite",
277
+ messages=[{"role": "user", "content": "Hello!"}]
278
+ )
279
+ print(response.choices[0].message.content)
280
+ """
281
+
282
+ AVAILABLE_MODELS = [
283
+ "gemini-2.0-flash-lite",
284
+ "gemini-2.0-flash",
285
+ "gemini-2.5-pro-preview-06-05",
286
+ "gemini-2.5-pro-preview-05-06",
287
+ "gemini-2.5-flash-preview-04-17",
288
+ "gemini-2.5-flash-preview-05-20",
289
+ ]
290
+
291
+ def __init__(
292
+ self,
293
+ api_key: Optional[str] = None, # Not used but included for compatibility
294
+ browser: str = "chrome",
295
+ **kwargs: Any
296
+ ):
297
+ """
298
+ Initialize the GeminiProxy client.
299
+
300
+ Args:
301
+ api_key: Not used but included for compatibility with OpenAI interface
302
+ browser: Browser type for fingerprinting
303
+ **kwargs: Additional parameters
304
+ """
305
+ super().__init__(api_key=api_key, **kwargs)
306
+
307
+ self.timeout = 30
308
+ self.base_url = "https://us-central1-infinite-chain-295909.cloudfunctions.net/gemini-proxy-staging-v1"
309
+
310
+ # Initialize LitAgent for fingerprinting
311
+ self.agent = LitAgent()
312
+ self.fingerprint = self.agent.generate_fingerprint(browser)
313
+
314
+ # Initialize session
315
+ self.session = requests.Session()
316
+ self.headers = self.fingerprint.copy()
317
+ self.session.headers.update(self.headers)
318
+ self.session.proxies = {}
319
+
320
+ # Initialize chat interface
321
+ self.chat = Chat(self)
322
+
323
+ @property
324
+ def models(self):
325
+ class _ModelList:
326
+ def list(inner_self):
327
+ return type(self).AVAILABLE_MODELS
328
+ return _ModelList()