webscout 8.3.1__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (77) hide show
  1. webscout/AIutel.py +46 -53
  2. webscout/Bing_search.py +418 -0
  3. webscout/Extra/gguf.py +706 -177
  4. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  5. webscout/Provider/GeminiProxy.py +140 -0
  6. webscout/Provider/MCPCore.py +78 -75
  7. webscout/Provider/OPENAI/BLACKBOXAI.py +1 -4
  8. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  9. webscout/Provider/OPENAI/README.md +2 -0
  10. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  11. webscout/Provider/OPENAI/__init__.py +15 -1
  12. webscout/Provider/OPENAI/autoproxy.py +332 -39
  13. webscout/Provider/OPENAI/base.py +15 -5
  14. webscout/Provider/OPENAI/e2b.py +0 -1
  15. webscout/Provider/OPENAI/mcpcore.py +109 -70
  16. webscout/Provider/OPENAI/scirachat.py +59 -51
  17. webscout/Provider/OPENAI/toolbaz.py +2 -9
  18. webscout/Provider/OPENAI/xenai.py +514 -0
  19. webscout/Provider/OPENAI/yep.py +8 -2
  20. webscout/Provider/TTI/__init__.py +1 -0
  21. webscout/Provider/TTI/bing.py +231 -0
  22. webscout/Provider/TTS/speechma.py +45 -39
  23. webscout/Provider/TogetherAI.py +366 -0
  24. webscout/Provider/XenAI.py +324 -0
  25. webscout/Provider/__init__.py +8 -3
  26. webscout/Provider/deepseek_assistant.py +378 -0
  27. webscout/auth/__init__.py +44 -0
  28. webscout/auth/api_key_manager.py +189 -0
  29. webscout/auth/auth_system.py +100 -0
  30. webscout/auth/config.py +76 -0
  31. webscout/auth/database.py +400 -0
  32. webscout/auth/exceptions.py +67 -0
  33. webscout/auth/middleware.py +248 -0
  34. webscout/auth/models.py +130 -0
  35. webscout/auth/providers.py +257 -0
  36. webscout/auth/rate_limiter.py +254 -0
  37. webscout/auth/request_models.py +127 -0
  38. webscout/auth/request_processing.py +226 -0
  39. webscout/auth/routes.py +526 -0
  40. webscout/auth/schemas.py +103 -0
  41. webscout/auth/server.py +312 -0
  42. webscout/auth/static/favicon.svg +11 -0
  43. webscout/auth/swagger_ui.py +203 -0
  44. webscout/auth/templates/components/authentication.html +237 -0
  45. webscout/auth/templates/components/base.html +103 -0
  46. webscout/auth/templates/components/endpoints.html +750 -0
  47. webscout/auth/templates/components/examples.html +491 -0
  48. webscout/auth/templates/components/footer.html +75 -0
  49. webscout/auth/templates/components/header.html +27 -0
  50. webscout/auth/templates/components/models.html +286 -0
  51. webscout/auth/templates/components/navigation.html +70 -0
  52. webscout/auth/templates/static/api.js +455 -0
  53. webscout/auth/templates/static/icons.js +168 -0
  54. webscout/auth/templates/static/main.js +784 -0
  55. webscout/auth/templates/static/particles.js +201 -0
  56. webscout/auth/templates/static/styles.css +3353 -0
  57. webscout/auth/templates/static/ui.js +374 -0
  58. webscout/auth/templates/swagger_ui.html +170 -0
  59. webscout/client.py +49 -3
  60. webscout/scout/core/scout.py +104 -26
  61. webscout/scout/element.py +139 -18
  62. webscout/swiftcli/core/cli.py +14 -3
  63. webscout/swiftcli/decorators/output.py +59 -9
  64. webscout/update_checker.py +31 -49
  65. webscout/version.py +1 -1
  66. webscout/webscout_search.py +4 -12
  67. webscout/webscout_search_async.py +3 -10
  68. webscout/yep_search.py +2 -11
  69. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  70. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/RECORD +74 -36
  71. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  72. webscout/Provider/HF_space/__init__.py +0 -0
  73. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  74. webscout/Provider/OPENAI/api.py +0 -1320
  75. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  76. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  77. {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,328 @@
1
+ import json
2
+ import time
3
+ import uuid
4
+ import base64
5
+ from typing import List, Dict, Optional, Union, Generator, Any
6
+
7
+ import requests
8
+ from uuid import uuid4
9
+
10
+ # Import base classes and utility structures
11
+ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
12
+ from .utils import (
13
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
14
+ ChatCompletionMessage, CompletionUsage, count_tokens
15
+ )
16
+
17
+ from webscout.litagent import LitAgent
18
+ from webscout import exceptions
19
+
20
+
21
+ class Completions(BaseCompletions):
22
+ def __init__(self, client: 'GeminiProxy'):
23
+ self._client = client
24
+
25
+ def create(
26
+ self,
27
+ *,
28
+ model: str,
29
+ messages: List[Dict[str, str]],
30
+ max_tokens: Optional[int] = None,
31
+ stream: bool = False,
32
+ temperature: Optional[float] = None,
33
+ top_p: Optional[float] = None,
34
+ timeout: Optional[int] = None,
35
+ proxies: Optional[dict] = None,
36
+ **kwargs: Any
37
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
+ """
39
+ Create a chat completion with GeminiProxy API.
40
+
41
+ Args:
42
+ model: The model to use (from AVAILABLE_MODELS)
43
+ messages: List of message dictionaries with 'role' and 'content'
44
+ max_tokens: Maximum number of tokens to generate
45
+ stream: Whether to stream the response (not supported by GeminiProxy)
46
+ temperature: Sampling temperature (0-1)
47
+ top_p: Nucleus sampling parameter (0-1)
48
+ timeout: Request timeout in seconds
49
+ proxies: Proxy configuration
50
+ **kwargs: Additional parameters to pass to the API
51
+
52
+ Returns:
53
+ If stream=False, returns a ChatCompletion object
54
+ If stream=True, returns a Generator yielding ChatCompletionChunk objects
55
+ """
56
+ # Generate request ID and timestamp
57
+ request_id = str(uuid.uuid4())
58
+ created_time = int(time.time())
59
+
60
+ # Extract image URL from kwargs if present
61
+ img_url = kwargs.get('img_url')
62
+
63
+ # Convert messages to GeminiProxy format
64
+ conversation_prompt = self._format_messages(messages)
65
+
66
+ # Prepare parts for the request
67
+ parts = []
68
+ if img_url:
69
+ parts.append({"inline_data": self._get_image(img_url, proxies, timeout)})
70
+ parts.append({"text": conversation_prompt})
71
+
72
+ # Prepare the payload
73
+ payload = {
74
+ "model": model,
75
+ "contents": [{"parts": parts}]
76
+ }
77
+
78
+ # GeminiProxy doesn't support streaming, so we always return non-streaming
79
+ if stream:
80
+ return self._create_streaming_fallback(
81
+ request_id=request_id,
82
+ created_time=created_time,
83
+ model=model,
84
+ payload=payload,
85
+ timeout=timeout,
86
+ proxies=proxies
87
+ )
88
+
89
+ # Non-streaming implementation
90
+ return self._create_non_streaming(
91
+ request_id=request_id,
92
+ created_time=created_time,
93
+ model=model,
94
+ payload=payload,
95
+ timeout=timeout,
96
+ proxies=proxies
97
+ )
98
+
99
+ def _format_messages(self, messages: List[Dict[str, str]]) -> str:
100
+ """Convert OpenAI messages format to a single conversation prompt."""
101
+ formatted_parts = []
102
+
103
+ for message in messages:
104
+ role = message.get("role", "")
105
+ content = message.get("content", "")
106
+
107
+ if role == "system":
108
+ formatted_parts.append(f"System: {content}")
109
+ elif role == "user":
110
+ formatted_parts.append(f"User: {content}")
111
+ elif role == "assistant":
112
+ formatted_parts.append(f"Assistant: {content}")
113
+
114
+ return "\n".join(formatted_parts)
115
+
116
+ def _get_image(self, img_url: str, proxies: Optional[dict] = None, timeout: Optional[int] = None) -> Dict[str, str]:
117
+ """Fetch and encode image from URL."""
118
+ try:
119
+ session = requests.Session()
120
+ if proxies:
121
+ session.proxies.update(proxies)
122
+
123
+ response = session.get(
124
+ img_url,
125
+ stream=True,
126
+ timeout=timeout or self._client.timeout
127
+ )
128
+ response.raise_for_status()
129
+
130
+ mime_type = response.headers.get("content-type", "application/octet-stream")
131
+ data = base64.b64encode(response.content).decode("utf-8")
132
+ return {"mime_type": mime_type, "data": data}
133
+ except Exception as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Error fetching image: {e}")
135
+
136
+ def _create_non_streaming(
137
+ self,
138
+ *,
139
+ request_id: str,
140
+ created_time: int,
141
+ model: str,
142
+ payload: Dict[str, Any],
143
+ timeout: Optional[int] = None,
144
+ proxies: Optional[dict] = None
145
+ ) -> ChatCompletion:
146
+ """Implementation for non-streaming chat completions."""
147
+ original_proxies = self._client.session.proxies.copy()
148
+ if proxies is not None:
149
+ self._client.session.proxies.update(proxies)
150
+
151
+ try:
152
+ response = self._client.session.post(
153
+ self._client.base_url,
154
+ json=payload,
155
+ headers=self._client.headers,
156
+ timeout=timeout if timeout is not None else self._client.timeout
157
+ )
158
+ response.raise_for_status()
159
+ data = response.json()
160
+
161
+ # Extract content from GeminiProxy response
162
+ content = self._extract_content(data)
163
+
164
+ # Create the completion message
165
+ message = ChatCompletionMessage(
166
+ role="assistant",
167
+ content=content
168
+ )
169
+
170
+ # Create the choice
171
+ choice = Choice(
172
+ index=0,
173
+ message=message,
174
+ finish_reason="stop"
175
+ )
176
+
177
+ # Estimate token usage
178
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("contents", [{}])[0].get("parts", [{}])])
179
+ completion_tokens = count_tokens(content)
180
+ usage = CompletionUsage(
181
+ prompt_tokens=prompt_tokens,
182
+ completion_tokens=completion_tokens,
183
+ total_tokens=prompt_tokens + completion_tokens
184
+ )
185
+
186
+ # Create the completion object
187
+ completion = ChatCompletion(
188
+ id=request_id,
189
+ choices=[choice],
190
+ created=created_time,
191
+ model=model,
192
+ usage=usage,
193
+ )
194
+
195
+ return completion
196
+
197
+ except Exception as e:
198
+ raise exceptions.FailedToGenerateResponseError(f"GeminiProxy request failed: {e}")
199
+ finally:
200
+ if proxies is not None:
201
+ self._client.session.proxies = original_proxies
202
+
203
+ def _create_streaming_fallback(
204
+ self,
205
+ *,
206
+ request_id: str,
207
+ created_time: int,
208
+ model: str,
209
+ payload: Dict[str, Any],
210
+ timeout: Optional[int] = None,
211
+ proxies: Optional[dict] = None
212
+ ) -> Generator[ChatCompletionChunk, None, None]:
213
+ """Fallback streaming implementation that simulates streaming from non-streaming response."""
214
+ # Get the full response first
215
+ completion = self._create_non_streaming(
216
+ request_id=request_id,
217
+ created_time=created_time,
218
+ model=model,
219
+ payload=payload,
220
+ timeout=timeout,
221
+ proxies=proxies
222
+ )
223
+
224
+ # Simulate streaming by yielding chunks
225
+ content = completion.choices[0].message.content
226
+ if content:
227
+ # Split content into chunks (simulate streaming)
228
+ chunk_size = max(1, len(content) // 10) # Split into ~10 chunks
229
+ for i in range(0, len(content), chunk_size):
230
+ chunk_content = content[i:i + chunk_size]
231
+
232
+ delta = ChoiceDelta(content=chunk_content)
233
+ choice = Choice(index=0, delta=delta, finish_reason=None)
234
+
235
+ chunk = ChatCompletionChunk(
236
+ id=request_id,
237
+ choices=[choice],
238
+ created=created_time,
239
+ model=model
240
+ )
241
+
242
+ yield chunk
243
+
244
+ # Final chunk with finish_reason
245
+ delta = ChoiceDelta(content=None)
246
+ choice = Choice(index=0, delta=delta, finish_reason="stop")
247
+ chunk = ChatCompletionChunk(
248
+ id=request_id,
249
+ choices=[choice],
250
+ created=created_time,
251
+ model=model
252
+ )
253
+
254
+ yield chunk
255
+
256
+ def _extract_content(self, response: dict) -> str:
257
+ """Extract content from GeminiProxy response."""
258
+ try:
259
+ return response['candidates'][0]['content']['parts'][0]['text']
260
+ except (KeyError, IndexError, TypeError):
261
+ return str(response)
262
+
263
+
264
+ class Chat(BaseChat):
265
+ def __init__(self, client: 'GeminiProxy'):
266
+ self.completions = Completions(client)
267
+
268
+
269
+ class GeminiProxy(OpenAICompatibleProvider):
270
+ """
271
+ OpenAI-compatible client for GeminiProxy API.
272
+
273
+ Usage:
274
+ client = GeminiProxy()
275
+ response = client.chat.completions.create(
276
+ model="gemini-2.0-flash-lite",
277
+ messages=[{"role": "user", "content": "Hello!"}]
278
+ )
279
+ print(response.choices[0].message.content)
280
+ """
281
+
282
+ AVAILABLE_MODELS = [
283
+ "gemini-2.0-flash-lite",
284
+ "gemini-2.0-flash",
285
+ "gemini-2.5-pro-preview-06-05",
286
+ "gemini-2.5-pro-preview-05-06",
287
+ "gemini-2.5-flash-preview-04-17",
288
+ "gemini-2.5-flash-preview-05-20",
289
+ ]
290
+
291
+ def __init__(
292
+ self,
293
+ api_key: Optional[str] = None, # Not used but included for compatibility
294
+ browser: str = "chrome",
295
+ **kwargs: Any
296
+ ):
297
+ """
298
+ Initialize the GeminiProxy client.
299
+
300
+ Args:
301
+ api_key: Not used but included for compatibility with OpenAI interface
302
+ browser: Browser type for fingerprinting
303
+ **kwargs: Additional parameters
304
+ """
305
+ super().__init__(api_key=api_key, **kwargs)
306
+
307
+ self.timeout = 30
308
+ self.base_url = "https://us-central1-infinite-chain-295909.cloudfunctions.net/gemini-proxy-staging-v1"
309
+
310
+ # Initialize LitAgent for fingerprinting
311
+ self.agent = LitAgent()
312
+ self.fingerprint = self.agent.generate_fingerprint(browser)
313
+
314
+ # Initialize session
315
+ self.session = requests.Session()
316
+ self.headers = self.fingerprint.copy()
317
+ self.session.headers.update(self.headers)
318
+ self.session.proxies = {}
319
+
320
+ # Initialize chat interface
321
+ self.chat = Chat(self)
322
+
323
+ @property
324
+ def models(self):
325
+ class _ModelList:
326
+ def list(inner_self):
327
+ return type(self).AVAILABLE_MODELS
328
+ return _ModelList()
@@ -68,6 +68,8 @@ Currently, the following providers are implemented with OpenAI-compatible interf
68
68
  - TogetherAI
69
69
  - PiAI
70
70
  - FalconH1
71
+ - XenAI
72
+ - GeminiProxy
71
73
  ---
72
74
 
73
75
 
@@ -0,0 +1,238 @@
1
+ # WebScout Auto-Proxy System
2
+
3
+ The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
4
+
5
+ ## Features
6
+
7
+ - **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
8
+ - **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
9
+ - **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
10
+ - **Working Proxy Detection**: Tests proxies to find working ones
11
+ - **Easy Disable Option**: Can be disabled per provider instance or globally
12
+
13
+ ## How It Works
14
+
15
+ The system uses a metaclass (`ProxyAutoMeta`) that automatically:
16
+
17
+ 1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
18
+ 2. Caches proxies for 5 minutes to avoid excessive requests
19
+ 3. Randomly selects a proxy for each provider instance
20
+ 4. Patches existing HTTP session objects with proxy configuration
21
+ 5. Provides helper methods for creating proxied sessions
22
+
23
+ ## Usage
24
+
25
+ ### Automatic Usage (Default)
26
+
27
+ All OpenAI-compatible providers automatically get proxy support:
28
+
29
+ ```python
30
+ from webscout.Provider.OPENAI.yep import YEPCHAT
31
+
32
+ # Proxy is automatically configured
33
+ client = YEPCHAT()
34
+
35
+ # All requests will use the configured proxy
36
+ response = client.chat.completions.create(
37
+ model="DeepSeek-R1-Distill-Qwen-32B",
38
+ messages=[{"role": "user", "content": "Hello!"}]
39
+ )
40
+ ```
41
+
42
+ ### Disabling Auto-Proxy
43
+
44
+ You can disable automatic proxy injection:
45
+
46
+ ```python
47
+ # Disable for a specific instance
48
+ client = YEPCHAT(disable_auto_proxy=True)
49
+
50
+ # Or set a class attribute to disable for all instances
51
+ class MyProvider(OpenAICompatibleProvider):
52
+ DISABLE_AUTO_PROXY = True
53
+ ```
54
+
55
+ ### Manual Proxy Configuration
56
+
57
+ You can also provide your own proxies:
58
+
59
+ ```python
60
+ custom_proxies = {
61
+ 'http': 'http://user:pass@proxy.example.com:8080',
62
+ 'https': 'http://user:pass@proxy.example.com:8080'
63
+ }
64
+
65
+ client = YEPCHAT(proxies=custom_proxies)
66
+ ```
67
+
68
+ ### Using Helper Methods
69
+
70
+ Each provider instance gets helper methods for creating proxied sessions:
71
+
72
+ ```python
73
+ client = YEPCHAT()
74
+
75
+ # Get a requests.Session with proxies configured
76
+ session = client.get_proxied_session()
77
+
78
+ # Get a curl_cffi Session with proxies configured
79
+ curl_session = client.get_proxied_curl_session(impersonate="chrome120")
80
+
81
+ # Get an httpx.Client with proxies configured (if httpx is installed)
82
+ httpx_client = client.get_proxied_httpx_client()
83
+ ```
84
+
85
+ ## Direct API Usage
86
+
87
+ You can also use the proxy functions directly:
88
+
89
+ ```python
90
+ from webscout.Provider.OPENAI.autoproxy import (
91
+ get_auto_proxy,
92
+ get_proxy_dict,
93
+ get_working_proxy,
94
+ test_proxy,
95
+ get_proxy_stats
96
+ )
97
+
98
+ # Get a random proxy
99
+ proxy = get_auto_proxy()
100
+
101
+ # Get proxy in dictionary format
102
+ proxy_dict = get_proxy_dict()
103
+
104
+ # Find a working proxy (tests multiple proxies)
105
+ working_proxy = get_working_proxy(max_attempts=5)
106
+
107
+ # Test if a proxy is working
108
+ is_working = test_proxy(proxy)
109
+
110
+ # Get proxy cache statistics
111
+ stats = get_proxy_stats()
112
+ ```
113
+
114
+ ## Proxy Format
115
+
116
+ The system expects proxies in the format:
117
+ ```
118
+ http://username:password@host:port
119
+ ```
120
+
121
+ Example:
122
+ ```
123
+ http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
124
+ ```
125
+
126
+ ## Configuration
127
+
128
+ ### Cache Duration
129
+
130
+ You can adjust the proxy cache duration:
131
+
132
+ ```python
133
+ from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
134
+
135
+ # Set cache to 10 minutes
136
+ set_proxy_cache_duration(600)
137
+ ```
138
+
139
+ ### Force Refresh
140
+
141
+ You can force refresh the proxy cache:
142
+
143
+ ```python
144
+ from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
145
+
146
+ # Force refresh and get number of proxies loaded
147
+ count = refresh_proxy_cache()
148
+ print(f"Loaded {count} proxies")
149
+ ```
150
+
151
+ ## Error Handling
152
+
153
+ The system gracefully handles errors:
154
+
155
+ - If proxy fetching fails, providers work without proxies
156
+ - If a proxy test fails, the system tries other proxies
157
+ - If no working proxy is found, providers fall back to direct connections
158
+
159
+ ## Logging
160
+
161
+ The system uses Python's logging module. To see proxy-related logs:
162
+
163
+ ```python
164
+ import logging
165
+ logging.basicConfig(level=logging.INFO)
166
+
167
+ # Or specifically for the autoproxy module
168
+ logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
169
+ logger.setLevel(logging.DEBUG)
170
+ ```
171
+
172
+ ## Testing
173
+
174
+ Run the test suite to verify functionality:
175
+
176
+ ```bash
177
+ python webscout/Provider/OPENAI/test_autoproxy.py
178
+ ```
179
+
180
+ ## Implementation Details
181
+
182
+ ### ProxyAutoMeta Metaclass
183
+
184
+ The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
185
+
186
+ 1. Intercepts class instantiation
187
+ 2. Checks for `disable_auto_proxy` parameter or class attribute
188
+ 3. Fetches and configures proxies if not disabled
189
+ 4. Patches existing session objects
190
+ 5. Adds helper methods to the instance
191
+
192
+ ### Session Patching
193
+
194
+ The system automatically patches these session types:
195
+ - `requests.Session` - Updates the `proxies` attribute
196
+ - `httpx.Client` - Sets the `_proxies` attribute
197
+ - `curl_cffi.Session` - Updates the `proxies` attribute
198
+ - `curl_cffi.AsyncSession` - Updates the `proxies` attribute
199
+
200
+ ### Proxy Source
201
+
202
+ Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
203
+
204
+ The system expects one proxy per line in the format shown above.
205
+
206
+ ## Troubleshooting
207
+
208
+ ### No Proxies Available
209
+
210
+ If you see "No proxies available" messages:
211
+ 1. Check if the proxy source URL is accessible
212
+ 2. Verify your internet connection
213
+ 3. Check if the proxy format is correct
214
+
215
+ ### Proxy Test Failures
216
+
217
+ If proxy tests fail:
218
+ 1. Some proxies may be temporarily unavailable (normal)
219
+ 2. The test URL (`https://httpbin.org/ip`) may be blocked
220
+ 3. Network connectivity issues
221
+
222
+ ### Provider Not Getting Proxies
223
+
224
+ If a provider doesn't get automatic proxies:
225
+ 1. Ensure it inherits from `OpenAICompatibleProvider`
226
+ 2. Check if `disable_auto_proxy` is set
227
+ 3. Verify the metaclass is properly imported
228
+
229
+ ## Contributing
230
+
231
+ To add proxy support to a new provider:
232
+
233
+ 1. Inherit from `OpenAICompatibleProvider`
234
+ 2. Accept `disable_auto_proxy` parameter in `__init__`
235
+ 3. Use `self.proxies` for HTTP requests
236
+ 4. Optionally use helper methods like `self.get_proxied_session()`
237
+
238
+ The metaclass will handle the rest automatically!
@@ -40,4 +40,18 @@ from .oivscode import * # Add OnRender provider
40
40
  from .Qwen3 import *
41
41
  from .FalconH1 import *
42
42
  from .PI import * # Add PI.ai provider
43
- from .TogetherAI import * # Add TogetherAI provider
43
+ from .TogetherAI import * # Add TogetherAI provider
44
+ from .xenai import * # Add XenAI provider
45
+ from .GeminiProxy import * # Add GeminiProxy provider
46
+
47
+ # Export auto-proxy functionality
48
+ from .autoproxy import (
49
+ get_auto_proxy,
50
+ get_proxy_dict,
51
+ get_working_proxy,
52
+ test_proxy,
53
+ get_proxy_stats,
54
+ refresh_proxy_cache,
55
+ set_proxy_cache_duration,
56
+ ProxyAutoMeta
57
+ )