webscout 8.3__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (120) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/AIutel.py +46 -53
  4. webscout/Bing_search.py +418 -0
  5. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  6. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  7. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  8. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  9. webscout/Extra/gguf.py +706 -177
  10. webscout/Litlogger/formats.py +9 -0
  11. webscout/Litlogger/handlers.py +18 -0
  12. webscout/Litlogger/logger.py +43 -1
  13. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  14. webscout/Provider/AISEARCH/scira_search.py +3 -2
  15. webscout/Provider/GeminiProxy.py +140 -0
  16. webscout/Provider/LambdaChat.py +7 -1
  17. webscout/Provider/MCPCore.py +78 -75
  18. webscout/Provider/OPENAI/BLACKBOXAI.py +1046 -1017
  19. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  20. webscout/Provider/OPENAI/Qwen3.py +303 -303
  21. webscout/Provider/OPENAI/README.md +5 -0
  22. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  23. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  24. webscout/Provider/OPENAI/__init__.py +16 -1
  25. webscout/Provider/OPENAI/autoproxy.py +332 -0
  26. webscout/Provider/OPENAI/base.py +101 -14
  27. webscout/Provider/OPENAI/chatgpt.py +15 -2
  28. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  29. webscout/Provider/OPENAI/deepinfra.py +339 -328
  30. webscout/Provider/OPENAI/e2b.py +295 -74
  31. webscout/Provider/OPENAI/mcpcore.py +109 -70
  32. webscout/Provider/OPENAI/opkfc.py +18 -6
  33. webscout/Provider/OPENAI/scirachat.py +59 -50
  34. webscout/Provider/OPENAI/toolbaz.py +2 -10
  35. webscout/Provider/OPENAI/writecream.py +166 -166
  36. webscout/Provider/OPENAI/x0gpt.py +367 -367
  37. webscout/Provider/OPENAI/xenai.py +514 -0
  38. webscout/Provider/OPENAI/yep.py +389 -383
  39. webscout/Provider/STT/__init__.py +3 -0
  40. webscout/Provider/STT/base.py +281 -0
  41. webscout/Provider/STT/elevenlabs.py +265 -0
  42. webscout/Provider/TTI/__init__.py +4 -1
  43. webscout/Provider/TTI/aiarta.py +399 -365
  44. webscout/Provider/TTI/base.py +74 -2
  45. webscout/Provider/TTI/bing.py +231 -0
  46. webscout/Provider/TTI/fastflux.py +63 -30
  47. webscout/Provider/TTI/gpt1image.py +149 -0
  48. webscout/Provider/TTI/imagen.py +196 -0
  49. webscout/Provider/TTI/magicstudio.py +60 -29
  50. webscout/Provider/TTI/piclumen.py +43 -32
  51. webscout/Provider/TTI/pixelmuse.py +232 -225
  52. webscout/Provider/TTI/pollinations.py +43 -32
  53. webscout/Provider/TTI/together.py +287 -0
  54. webscout/Provider/TTI/utils.py +2 -1
  55. webscout/Provider/TTS/README.md +1 -0
  56. webscout/Provider/TTS/__init__.py +2 -1
  57. webscout/Provider/TTS/freetts.py +140 -0
  58. webscout/Provider/TTS/speechma.py +45 -39
  59. webscout/Provider/TogetherAI.py +366 -0
  60. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  61. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  62. webscout/Provider/XenAI.py +324 -0
  63. webscout/Provider/__init__.py +8 -0
  64. webscout/Provider/deepseek_assistant.py +378 -0
  65. webscout/Provider/scira_chat.py +3 -2
  66. webscout/Provider/toolbaz.py +0 -1
  67. webscout/auth/__init__.py +44 -0
  68. webscout/auth/api_key_manager.py +189 -0
  69. webscout/auth/auth_system.py +100 -0
  70. webscout/auth/config.py +76 -0
  71. webscout/auth/database.py +400 -0
  72. webscout/auth/exceptions.py +67 -0
  73. webscout/auth/middleware.py +248 -0
  74. webscout/auth/models.py +130 -0
  75. webscout/auth/providers.py +257 -0
  76. webscout/auth/rate_limiter.py +254 -0
  77. webscout/auth/request_models.py +127 -0
  78. webscout/auth/request_processing.py +226 -0
  79. webscout/auth/routes.py +526 -0
  80. webscout/auth/schemas.py +103 -0
  81. webscout/auth/server.py +312 -0
  82. webscout/auth/static/favicon.svg +11 -0
  83. webscout/auth/swagger_ui.py +203 -0
  84. webscout/auth/templates/components/authentication.html +237 -0
  85. webscout/auth/templates/components/base.html +103 -0
  86. webscout/auth/templates/components/endpoints.html +750 -0
  87. webscout/auth/templates/components/examples.html +491 -0
  88. webscout/auth/templates/components/footer.html +75 -0
  89. webscout/auth/templates/components/header.html +27 -0
  90. webscout/auth/templates/components/models.html +286 -0
  91. webscout/auth/templates/components/navigation.html +70 -0
  92. webscout/auth/templates/static/api.js +455 -0
  93. webscout/auth/templates/static/icons.js +168 -0
  94. webscout/auth/templates/static/main.js +784 -0
  95. webscout/auth/templates/static/particles.js +201 -0
  96. webscout/auth/templates/static/styles.css +3353 -0
  97. webscout/auth/templates/static/ui.js +374 -0
  98. webscout/auth/templates/swagger_ui.html +170 -0
  99. webscout/client.py +49 -3
  100. webscout/litagent/Readme.md +12 -3
  101. webscout/litagent/agent.py +99 -62
  102. webscout/scout/core/scout.py +104 -26
  103. webscout/scout/element.py +139 -18
  104. webscout/swiftcli/core/cli.py +14 -3
  105. webscout/swiftcli/decorators/output.py +59 -9
  106. webscout/update_checker.py +31 -49
  107. webscout/version.py +1 -1
  108. webscout/webscout_search.py +4 -12
  109. webscout/webscout_search_async.py +3 -10
  110. webscout/yep_search.py +2 -11
  111. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  112. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/RECORD +116 -68
  113. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  114. webscout/Provider/HF_space/__init__.py +0 -0
  115. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  116. webscout/Provider/OPENAI/api.py +0 -1035
  117. webscout/Provider/TTI/artbit.py +0 -0
  118. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  119. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  120. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,238 @@
1
+ # WebScout Auto-Proxy System
2
+
3
+ The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
4
+
5
+ ## Features
6
+
7
+ - **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
8
+ - **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
9
+ - **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
10
+ - **Working Proxy Detection**: Tests proxies to find working ones
11
+ - **Easy Disable Option**: Can be disabled per provider instance or globally
12
+
13
+ ## How It Works
14
+
15
+ The system uses a metaclass (`ProxyAutoMeta`) that automatically:
16
+
17
+ 1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
18
+ 2. Caches proxies for 5 minutes to avoid excessive requests
19
+ 3. Randomly selects a proxy for each provider instance
20
+ 4. Patches existing HTTP session objects with proxy configuration
21
+ 5. Provides helper methods for creating proxied sessions
22
+
23
+ ## Usage
24
+
25
+ ### Automatic Usage (Default)
26
+
27
+ All OpenAI-compatible providers automatically get proxy support:
28
+
29
+ ```python
30
+ from webscout.Provider.OPENAI.yep import YEPCHAT
31
+
32
+ # Proxy is automatically configured
33
+ client = YEPCHAT()
34
+
35
+ # All requests will use the configured proxy
36
+ response = client.chat.completions.create(
37
+ model="DeepSeek-R1-Distill-Qwen-32B",
38
+ messages=[{"role": "user", "content": "Hello!"}]
39
+ )
40
+ ```
41
+
42
+ ### Disabling Auto-Proxy
43
+
44
+ You can disable automatic proxy injection:
45
+
46
+ ```python
47
+ # Disable for a specific instance
48
+ client = YEPCHAT(disable_auto_proxy=True)
49
+
50
+ # Or set a class attribute to disable for all instances
51
+ class MyProvider(OpenAICompatibleProvider):
52
+ DISABLE_AUTO_PROXY = True
53
+ ```
54
+
55
+ ### Manual Proxy Configuration
56
+
57
+ You can also provide your own proxies:
58
+
59
+ ```python
60
+ custom_proxies = {
61
+ 'http': 'http://user:pass@proxy.example.com:8080',
62
+ 'https': 'http://user:pass@proxy.example.com:8080'
63
+ }
64
+
65
+ client = YEPCHAT(proxies=custom_proxies)
66
+ ```
67
+
68
+ ### Using Helper Methods
69
+
70
+ Each provider instance gets helper methods for creating proxied sessions:
71
+
72
+ ```python
73
+ client = YEPCHAT()
74
+
75
+ # Get a requests.Session with proxies configured
76
+ session = client.get_proxied_session()
77
+
78
+ # Get a curl_cffi Session with proxies configured
79
+ curl_session = client.get_proxied_curl_session(impersonate="chrome120")
80
+
81
+ # Get an httpx.Client with proxies configured (if httpx is installed)
82
+ httpx_client = client.get_proxied_httpx_client()
83
+ ```
84
+
85
+ ## Direct API Usage
86
+
87
+ You can also use the proxy functions directly:
88
+
89
+ ```python
90
+ from webscout.Provider.OPENAI.autoproxy import (
91
+ get_auto_proxy,
92
+ get_proxy_dict,
93
+ get_working_proxy,
94
+ test_proxy,
95
+ get_proxy_stats
96
+ )
97
+
98
+ # Get a random proxy
99
+ proxy = get_auto_proxy()
100
+
101
+ # Get proxy in dictionary format
102
+ proxy_dict = get_proxy_dict()
103
+
104
+ # Find a working proxy (tests multiple proxies)
105
+ working_proxy = get_working_proxy(max_attempts=5)
106
+
107
+ # Test if a proxy is working
108
+ is_working = test_proxy(proxy)
109
+
110
+ # Get proxy cache statistics
111
+ stats = get_proxy_stats()
112
+ ```
113
+
114
+ ## Proxy Format
115
+
116
+ The system expects proxies in the format:
117
+ ```
118
+ http://username:password@host:port
119
+ ```
120
+
121
+ Example:
122
+ ```
123
+ http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
124
+ ```
125
+
126
+ ## Configuration
127
+
128
+ ### Cache Duration
129
+
130
+ You can adjust the proxy cache duration:
131
+
132
+ ```python
133
+ from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
134
+
135
+ # Set cache to 10 minutes
136
+ set_proxy_cache_duration(600)
137
+ ```
138
+
139
+ ### Force Refresh
140
+
141
+ You can force refresh the proxy cache:
142
+
143
+ ```python
144
+ from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
145
+
146
+ # Force refresh and get number of proxies loaded
147
+ count = refresh_proxy_cache()
148
+ print(f"Loaded {count} proxies")
149
+ ```
150
+
151
+ ## Error Handling
152
+
153
+ The system gracefully handles errors:
154
+
155
+ - If proxy fetching fails, providers work without proxies
156
+ - If a proxy test fails, the system tries other proxies
157
+ - If no working proxy is found, providers fall back to direct connections
158
+
159
+ ## Logging
160
+
161
+ The system uses Python's logging module. To see proxy-related logs:
162
+
163
+ ```python
164
+ import logging
165
+ logging.basicConfig(level=logging.INFO)
166
+
167
+ # Or specifically for the autoproxy module
168
+ logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
169
+ logger.setLevel(logging.DEBUG)
170
+ ```
171
+
172
+ ## Testing
173
+
174
+ Run the test suite to verify functionality:
175
+
176
+ ```bash
177
+ python webscout/Provider/OPENAI/test_autoproxy.py
178
+ ```
179
+
180
+ ## Implementation Details
181
+
182
+ ### ProxyAutoMeta Metaclass
183
+
184
+ The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
185
+
186
+ 1. Intercepts class instantiation
187
+ 2. Checks for `disable_auto_proxy` parameter or class attribute
188
+ 3. Fetches and configures proxies if not disabled
189
+ 4. Patches existing session objects
190
+ 5. Adds helper methods to the instance
191
+
192
+ ### Session Patching
193
+
194
+ The system automatically patches these session types:
195
+ - `requests.Session` - Updates the `proxies` attribute
196
+ - `httpx.Client` - Sets the `_proxies` attribute
197
+ - `curl_cffi.Session` - Updates the `proxies` attribute
198
+ - `curl_cffi.AsyncSession` - Updates the `proxies` attribute
199
+
200
+ ### Proxy Source
201
+
202
+ Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
203
+
204
+ The system expects one proxy per line in the format shown above.
205
+
206
+ ## Troubleshooting
207
+
208
+ ### No Proxies Available
209
+
210
+ If you see "No proxies available" messages:
211
+ 1. Check if the proxy source URL is accessible
212
+ 2. Verify your internet connection
213
+ 3. Check if the proxy format is correct
214
+
215
+ ### Proxy Test Failures
216
+
217
+ If proxy tests fail:
218
+ 1. Some proxies may be temporarily unavailable (normal)
219
+ 2. The test URL (`https://httpbin.org/ip`) may be blocked
220
+ 3. Network connectivity issues
221
+
222
+ ### Provider Not Getting Proxies
223
+
224
+ If a provider doesn't get automatic proxies:
225
+ 1. Ensure it inherits from `OpenAICompatibleProvider`
226
+ 2. Check if `disable_auto_proxy` is set
227
+ 3. Verify the metaclass is properly imported
228
+
229
+ ## Contributing
230
+
231
+ To add proxy support to a new provider:
232
+
233
+ 1. Inherit from `OpenAICompatibleProvider`
234
+ 2. Accept `disable_auto_proxy` parameter in `__init__`
235
+ 3. Use `self.proxies` for HTTP requests
236
+ 4. Optionally use helper methods like `self.get_proxied_session()`
237
+
238
+ The metaclass will handle the rest automatically!
@@ -0,0 +1,355 @@
1
+ from typing import List, Dict, Optional, Union, Generator, Any
2
+
3
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
4
+ from webscout.Provider.OPENAI.utils import (
5
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
6
+ ChatCompletionMessage, CompletionUsage, count_tokens
7
+ )
8
+
9
+ import requests
10
+ import uuid
11
+ import time
12
+ import json
13
+ from webscout.litagent import LitAgent
14
+
15
+ class Completions(BaseCompletions):
16
+ def __init__(self, client: 'TogetherAI'):
17
+ self._client = client
18
+
19
+ def create(
20
+ self,
21
+ *,
22
+ model: str,
23
+ messages: List[Dict[str, str]],
24
+ max_tokens: Optional[int] = None,
25
+ stream: bool = False,
26
+ temperature: Optional[float] = None,
27
+ top_p: Optional[float] = None,
28
+ timeout: Optional[int] = None,
29
+ proxies: Optional[Dict[str, str]] = None,
30
+ stop: Optional[Union[str, List[str]]] = None,
31
+ **kwargs: Any
32
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
33
+ """
34
+ Creates a model response for the given chat conversation.
35
+ Mimics openai.chat.completions.create
36
+ """
37
+ # Get API key if not already set
38
+ if not self._client.headers.get("Authorization"):
39
+ api_key = self._client.get_activation_key()
40
+ self._client.headers["Authorization"] = f"Bearer {api_key}"
41
+ self._client.session.headers.update(self._client.headers)
42
+
43
+ model_name = self._client.convert_model_name(model)
44
+ payload = {
45
+ "model": model_name,
46
+ "messages": messages,
47
+ "stream": stream,
48
+ }
49
+ if max_tokens is not None:
50
+ payload["max_tokens"] = max_tokens
51
+ if temperature is not None:
52
+ payload["temperature"] = temperature
53
+ if top_p is not None:
54
+ payload["top_p"] = top_p
55
+ if stop is not None:
56
+ payload["stop"] = stop
57
+ payload.update(kwargs)
58
+
59
+ request_id = f"chatcmpl-{uuid.uuid4()}"
60
+ created_time = int(time.time())
61
+
62
+ if stream:
63
+ return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
64
+ else:
65
+ return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
66
+
67
+ def _create_stream(
68
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
69
+ ) -> Generator[ChatCompletionChunk, None, None]:
70
+ try:
71
+ response = self._client.session.post(
72
+ self._client.api_endpoint,
73
+ headers=self._client.headers,
74
+ json=payload,
75
+ stream=True,
76
+ timeout=timeout or self._client.timeout,
77
+ proxies=proxies
78
+ )
79
+ response.raise_for_status()
80
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
81
+ completion_tokens = 0
82
+ total_tokens = prompt_tokens
83
+
84
+ for line in response.iter_lines():
85
+ if line:
86
+ line = line.decode('utf-8')
87
+ if line.startswith('data: '):
88
+ line = line[6:]
89
+ if line.strip() == '[DONE]':
90
+ break
91
+ try:
92
+ chunk_data = json.loads(line)
93
+ if 'choices' in chunk_data and chunk_data['choices']:
94
+ delta = chunk_data['choices'][0].get('delta', {})
95
+ content = delta.get('content')
96
+ if content:
97
+ completion_tokens += count_tokens(content)
98
+ total_tokens = prompt_tokens + completion_tokens
99
+ choice_delta = ChoiceDelta(
100
+ content=content,
101
+ role=delta.get('role', 'assistant'),
102
+ tool_calls=delta.get('tool_calls')
103
+ )
104
+ choice = Choice(
105
+ index=0,
106
+ delta=choice_delta,
107
+ finish_reason=None,
108
+ logprobs=None
109
+ )
110
+ chunk = ChatCompletionChunk(
111
+ id=request_id,
112
+ choices=[choice],
113
+ created=created_time,
114
+ model=model
115
+ )
116
+ chunk.usage = {
117
+ "prompt_tokens": prompt_tokens,
118
+ "completion_tokens": completion_tokens,
119
+ "total_tokens": total_tokens,
120
+ "estimated_cost": None
121
+ }
122
+ yield chunk
123
+ except Exception:
124
+ continue
125
+
126
+ # Final chunk with finish_reason="stop"
127
+ delta = ChoiceDelta(content=None, role=None, tool_calls=None)
128
+ choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
129
+ chunk = ChatCompletionChunk(
130
+ id=request_id,
131
+ choices=[choice],
132
+ created=created_time,
133
+ model=model
134
+ )
135
+ chunk.usage = {
136
+ "prompt_tokens": prompt_tokens,
137
+ "completion_tokens": completion_tokens,
138
+ "total_tokens": total_tokens,
139
+ "estimated_cost": None
140
+ }
141
+ yield chunk
142
+ except Exception as e:
143
+ raise IOError(f"TogetherAI stream request failed: {e}") from e
144
+
145
+ def _create_non_stream(
146
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
147
+ ) -> ChatCompletion:
148
+ try:
149
+ payload_copy = payload.copy()
150
+ payload_copy["stream"] = False
151
+ response = self._client.session.post(
152
+ self._client.api_endpoint,
153
+ headers=self._client.headers,
154
+ json=payload_copy,
155
+ timeout=timeout or self._client.timeout,
156
+ proxies=proxies
157
+ )
158
+ response.raise_for_status()
159
+ data = response.json()
160
+
161
+ full_text = ""
162
+ finish_reason = "stop"
163
+ if 'choices' in data and data['choices']:
164
+ full_text = data['choices'][0]['message']['content']
165
+ finish_reason = data['choices'][0].get('finish_reason', 'stop')
166
+
167
+ message = ChatCompletionMessage(
168
+ role="assistant",
169
+ content=full_text
170
+ )
171
+ choice = Choice(
172
+ index=0,
173
+ message=message,
174
+ finish_reason=finish_reason
175
+ )
176
+
177
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
178
+ completion_tokens = count_tokens(full_text)
179
+ usage = CompletionUsage(
180
+ prompt_tokens=prompt_tokens,
181
+ completion_tokens=completion_tokens,
182
+ total_tokens=prompt_tokens + completion_tokens
183
+ )
184
+
185
+ completion = ChatCompletion(
186
+ id=request_id,
187
+ choices=[choice],
188
+ created=created_time,
189
+ model=model,
190
+ usage=usage,
191
+ )
192
+ return completion
193
+ except Exception as e:
194
+ raise IOError(f"TogetherAI non-stream request failed: {e}") from e
195
+
196
+
197
+ class Chat(BaseChat):
198
+ def __init__(self, client: 'TogetherAI'):
199
+ self.completions = Completions(client)
200
+
201
+
202
+ class TogetherAI(OpenAICompatibleProvider):
203
+ """
204
+ OpenAI-compatible client for TogetherAI API.
205
+ """
206
+ class TogetherAI(OpenAICompatibleProvider):
207
+ """
208
+ OpenAI-compatible client for TogetherAI API.
209
+ """
210
+ AVAILABLE_MODELS = [
211
+ "Gryphe/MythoMax-L2-13b",
212
+ "Gryphe/MythoMax-L2-13b-Lite",
213
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
214
+ "Qwen/QwQ-32B",
215
+ "Qwen/Qwen2-72B-Instruct",
216
+ "Qwen/Qwen2-VL-72B-Instruct",
217
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
218
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
219
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
220
+ "Qwen/Qwen2.5-VL-72B-Instruct",
221
+ "Qwen/Qwen3-235B-A22B-fp8",
222
+ "Qwen/Qwen3-235B-A22B-fp8-tput",
223
+ "Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
224
+ "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
225
+ "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
226
+ "arcee-ai/arcee-blitz",
227
+ "arcee-ai/caller",
228
+ "arcee-ai/coder-large",
229
+ "arcee-ai/maestro-reasoning",
230
+ "arcee-ai/virtuoso-large",
231
+ "arcee-ai/virtuoso-medium-v2",
232
+ "arcee_ai/arcee-spotlight",
233
+ "blackbox/meta-llama-3-1-8b",
234
+ "deepseek-ai/DeepSeek-R1",
235
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
236
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
237
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
238
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
239
+ "deepseek-ai/DeepSeek-V3",
240
+ "deepseek-ai/DeepSeek-V3-p-dp",
241
+ "google/gemma-2-27b-it",
242
+ "google/gemma-2b-it",
243
+ "lgai/exaone-3-5-32b-instruct",
244
+ "lgai/exaone-deep-32b",
245
+ "marin-community/marin-8b-instruct",
246
+ "meta-llama/Llama-3-70b-chat-hf",
247
+ "meta-llama/Llama-3-8b-chat-hf",
248
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
249
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
250
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
251
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo",
252
+ "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
253
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
254
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
255
+ "meta-llama/Llama-Vision-Free",
256
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
257
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
258
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
259
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
260
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
261
+ "mistralai/Mistral-7B-Instruct-v0.1",
262
+ "mistralai/Mistral-7B-Instruct-v0.2",
263
+ "mistralai/Mistral-7B-Instruct-v0.3",
264
+ "mistralai/Mistral-Small-24B-Instruct-2501",
265
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
266
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
267
+ "perplexity-ai/r1-1776",
268
+ "roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
269
+ "roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
270
+ "roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
271
+ "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
272
+ "scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
273
+ "togethercomputer/MoA-1",
274
+ "togethercomputer/MoA-1-Turbo",
275
+ "togethercomputer/Refuel-Llm-V2",
276
+ "togethercomputer/Refuel-Llm-V2-Small",
277
+ ]
278
+
279
+ def __init__(self, browser: str = "chrome"):
280
+ self.timeout = 60
281
+ self.api_endpoint = "https://api.together.xyz/v1/chat/completions"
282
+ self.activation_endpoint = "https://www.codegeneration.ai/activate-v2"
283
+ self.session = requests.Session()
284
+ self.headers = LitAgent().generate_fingerprint(browser=browser)
285
+ self.session.headers.update(self.headers)
286
+ self.chat = Chat(self)
287
+ self._api_key_cache = None
288
+
289
+ @property
290
+ def models(self):
291
+ class _ModelList:
292
+ def list(inner_self):
293
+ return TogetherAI.AVAILABLE_MODELS
294
+ return _ModelList()
295
+
296
+ def get_activation_key(self) -> str:
297
+ """Get API key from activation endpoint"""
298
+ if self._api_key_cache:
299
+ return self._api_key_cache
300
+
301
+ try:
302
+ response = requests.get(
303
+ self.activation_endpoint,
304
+ headers={"Accept": "application/json"},
305
+ timeout=30
306
+ )
307
+ response.raise_for_status()
308
+ activation_data = response.json()
309
+ self._api_key_cache = activation_data["openAIParams"]["apiKey"]
310
+ return self._api_key_cache
311
+ except Exception as e:
312
+ raise Exception(f"Failed to get activation key: {e}")
313
+
314
+ def convert_model_name(self, model: str) -> str:
315
+ """Convert model name - returns model if valid, otherwise default"""
316
+ if model in self.AVAILABLE_MODELS:
317
+ return model
318
+
319
+ # Default to first available model if not found
320
+ return self.AVAILABLE_MODELS[0]
321
+
322
+
323
+ if __name__ == "__main__":
324
+ from rich import print
325
+
326
+ client = TogetherAI()
327
+ messages = [
328
+ {"role": "user", "content": "Hello, how are you?"},
329
+ {"role": "assistant", "content": "I'm fine, thank you! How can I help you today?"},
330
+ {"role": "user", "content": "Tell me a short joke."}
331
+ ]
332
+
333
+ # Non-streaming example
334
+ response = client.chat.completions.create(
335
+ model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
336
+ messages=messages,
337
+ max_tokens=50,
338
+ stream=False
339
+ )
340
+ print("Non-streaming response:")
341
+ print(response)
342
+
343
+ # Streaming example
344
+ print("\nStreaming response:")
345
+ stream = client.chat.completions.create(
346
+ model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
347
+ messages=messages,
348
+ max_tokens=50,
349
+ stream=True
350
+ )
351
+
352
+ for chunk in stream:
353
+ if chunk.choices[0].delta.content:
354
+ print(chunk.choices[0].delta.content, end="")
355
+ print()
@@ -39,4 +39,19 @@ from .TwoAI import *
39
39
  from .oivscode import * # Add OnRender provider
40
40
  from .Qwen3 import *
41
41
  from .FalconH1 import *
42
- from .PI import * # Add PI.ai provider
42
+ from .PI import * # Add PI.ai provider
43
+ from .TogetherAI import * # Add TogetherAI provider
44
+ from .xenai import * # Add XenAI provider
45
+ from .GeminiProxy import * # Add GeminiProxy provider
46
+
47
+ # Export auto-proxy functionality
48
+ from .autoproxy import (
49
+ get_auto_proxy,
50
+ get_proxy_dict,
51
+ get_working_proxy,
52
+ test_proxy,
53
+ get_proxy_stats,
54
+ refresh_proxy_cache,
55
+ set_proxy_cache_duration,
56
+ ProxyAutoMeta
57
+ )