webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +180 -78
- webscout/Bing_search.py +417 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +32 -29
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +17 -1
- webscout/Provider/OPENAI/autoproxy.py +1067 -39
- webscout/Provider/OPENAI/base.py +17 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +3 -9
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +350 -0
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +55 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +279 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +550 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +367 -0
- webscout/client.py +121 -70
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
|
@@ -21,9 +21,9 @@
|
|
|
21
21
|
|
|
22
22
|
The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
|
|
23
23
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
24
|
+
- Use the same code structure across different AI providers
|
|
25
|
+
- Switch between providers without major code changes
|
|
26
|
+
- Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
|
|
27
27
|
|
|
28
28
|
## ⚙️ Available Providers
|
|
29
29
|
|
|
@@ -68,8 +68,11 @@ Currently, the following providers are implemented with OpenAI-compatible interf
|
|
|
68
68
|
- TogetherAI
|
|
69
69
|
- PiAI
|
|
70
70
|
- FalconH1
|
|
71
|
-
|
|
72
|
-
|
|
71
|
+
- XenAI
|
|
72
|
+
- GeminiProxy
|
|
73
|
+
- MonoChat
|
|
74
|
+
- Friendli
|
|
75
|
+
- MiniMax
|
|
73
76
|
|
|
74
77
|
## 💻 Usage Examples
|
|
75
78
|
|
|
@@ -907,17 +910,17 @@ All providers return responses that mimic the OpenAI API structure, ensuring com
|
|
|
907
910
|
|
|
908
911
|
The OpenAI-compatible providers are built on a modular architecture:
|
|
909
912
|
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
+
- `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
|
|
914
|
+
- `utils.py`: Provides data structures that mimic OpenAI's response format
|
|
915
|
+
- Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
|
|
913
916
|
|
|
914
917
|
This architecture makes it easy to add new providers while maintaining a consistent interface.
|
|
915
918
|
|
|
916
919
|
## 📝 Notes
|
|
917
920
|
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
+
- Some providers may require API keys for full functionality
|
|
922
|
+
- Not all OpenAI features are supported by all providers
|
|
923
|
+
- Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
|
|
921
924
|
|
|
922
925
|
## 🤝 Contributing
|
|
923
926
|
|
|
@@ -930,24 +933,24 @@ Want to add a new OpenAI-compatible provider? Follow these steps:
|
|
|
930
933
|
|
|
931
934
|
## 📚 Related Documentation
|
|
932
935
|
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
936
|
+
- [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
|
|
937
|
+
- [DeepInfra Documentation](https://deepinfra.com/docs)
|
|
938
|
+
- [Glider.so Website](https://glider.so/)
|
|
939
|
+
- [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
|
|
940
|
+
- [X0GPT Website](https://x0-gpt.devwtf.in/)
|
|
941
|
+
- [WiseCat Website](https://wise-cat-groq.vercel.app/)
|
|
942
|
+
- [Venice AI Website](https://venice.ai/)
|
|
943
|
+
- [ExaAI Website](https://o3minichat.exa.ai/)
|
|
944
|
+
- [TypeGPT Website](https://chat.typegpt.net/)
|
|
945
|
+
- [SciraChat Website](https://scira.ai/)
|
|
946
|
+
- [FreeAIChat Website](https://freeaichatplayground.com/)
|
|
947
|
+
- [LLMChatCo Website](https://llmchat.co/)
|
|
948
|
+
- [Yep.com Website](https://yep.com/)
|
|
949
|
+
- [HeckAI Website](https://heck.ai/)
|
|
950
|
+
- [SonusAI Website](https://chat.sonus.ai/)
|
|
951
|
+
- [ExaChat Website](https://exa-chat.vercel.app/)
|
|
952
|
+
- [Netwrck Website](https://netwrck.com/)
|
|
953
|
+
- [StandardInput Website](https://chat.standard-input.com/)
|
|
951
954
|
|
|
952
955
|
<div align="center">
|
|
953
956
|
<a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
# WebScout Auto-Proxy System
|
|
2
|
+
|
|
3
|
+
The WebScout Auto-Proxy system provides automatic proxy injection for all OpenAI-compatible providers. This system fetches proxies from a remote source and automatically configures them for HTTP sessions.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Automatic Proxy Injection**: All OpenAI-compatible providers automatically get proxy support
|
|
8
|
+
- **Multiple HTTP Client Support**: Works with `requests`, `httpx`, and `curl_cffi`
|
|
9
|
+
- **Proxy Pool Management**: Automatically fetches and caches proxies from remote source
|
|
10
|
+
- **Working Proxy Detection**: Tests proxies to find working ones
|
|
11
|
+
- **Easy Disable Option**: Can be disabled per provider instance or globally
|
|
12
|
+
|
|
13
|
+
## How It Works
|
|
14
|
+
|
|
15
|
+
The system uses a metaclass (`ProxyAutoMeta`) that automatically:
|
|
16
|
+
|
|
17
|
+
1. Fetches proxies from `http://207.180.209.185:5000/ips.txt`
|
|
18
|
+
2. Caches proxies for 5 minutes to avoid excessive requests
|
|
19
|
+
3. Randomly selects a proxy for each provider instance
|
|
20
|
+
4. Patches existing HTTP session objects with proxy configuration
|
|
21
|
+
5. Provides helper methods for creating proxied sessions
|
|
22
|
+
|
|
23
|
+
## Usage
|
|
24
|
+
|
|
25
|
+
### Automatic Usage (Default)
|
|
26
|
+
|
|
27
|
+
All OpenAI-compatible providers automatically get proxy support:
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from webscout.Provider.OPENAI.yep import YEPCHAT
|
|
31
|
+
|
|
32
|
+
# Proxy is automatically configured
|
|
33
|
+
client = YEPCHAT()
|
|
34
|
+
|
|
35
|
+
# All requests will use the configured proxy
|
|
36
|
+
response = client.chat.completions.create(
|
|
37
|
+
model="DeepSeek-R1-Distill-Qwen-32B",
|
|
38
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
39
|
+
)
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### Disabling Auto-Proxy
|
|
43
|
+
|
|
44
|
+
You can disable automatic proxy injection:
|
|
45
|
+
|
|
46
|
+
```python
|
|
47
|
+
# Disable for a specific instance
|
|
48
|
+
client = YEPCHAT(disable_auto_proxy=True)
|
|
49
|
+
|
|
50
|
+
# Or set a class attribute to disable for all instances
|
|
51
|
+
class MyProvider(OpenAICompatibleProvider):
|
|
52
|
+
DISABLE_AUTO_PROXY = True
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Manual Proxy Configuration
|
|
56
|
+
|
|
57
|
+
You can also provide your own proxies:
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
custom_proxies = {
|
|
61
|
+
'http': 'http://user:pass@proxy.example.com:8080',
|
|
62
|
+
'https': 'http://user:pass@proxy.example.com:8080'
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
client = YEPCHAT(proxies=custom_proxies)
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Using Helper Methods
|
|
69
|
+
|
|
70
|
+
Each provider instance gets helper methods for creating proxied sessions:
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
client = YEPCHAT()
|
|
74
|
+
|
|
75
|
+
# Get a requests.Session with proxies configured
|
|
76
|
+
session = client.get_proxied_session()
|
|
77
|
+
|
|
78
|
+
# Get a curl_cffi Session with proxies configured
|
|
79
|
+
curl_session = client.get_proxied_curl_session(impersonate="chrome120")
|
|
80
|
+
|
|
81
|
+
# Get an httpx.Client with proxies configured (if httpx is installed)
|
|
82
|
+
httpx_client = client.get_proxied_httpx_client()
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Direct API Usage
|
|
86
|
+
|
|
87
|
+
You can also use the proxy functions directly:
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
from webscout.Provider.OPENAI.autoproxy import (
|
|
91
|
+
get_auto_proxy,
|
|
92
|
+
get_proxy_dict,
|
|
93
|
+
get_working_proxy,
|
|
94
|
+
test_proxy,
|
|
95
|
+
get_proxy_stats
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Get a random proxy
|
|
99
|
+
proxy = get_auto_proxy()
|
|
100
|
+
|
|
101
|
+
# Get proxy in dictionary format
|
|
102
|
+
proxy_dict = get_proxy_dict()
|
|
103
|
+
|
|
104
|
+
# Find a working proxy (tests multiple proxies)
|
|
105
|
+
working_proxy = get_working_proxy(max_attempts=5)
|
|
106
|
+
|
|
107
|
+
# Test if a proxy is working
|
|
108
|
+
is_working = test_proxy(proxy)
|
|
109
|
+
|
|
110
|
+
# Get proxy cache statistics
|
|
111
|
+
stats = get_proxy_stats()
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
## Proxy Format
|
|
115
|
+
|
|
116
|
+
The system expects proxies in the format:
|
|
117
|
+
```
|
|
118
|
+
http://username:password@host:port
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Example:
|
|
122
|
+
```
|
|
123
|
+
http://fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==:fnXlN8NP6StpxZkxmNLyOt2MaVLQunpGC7K96j7R0KbnE5sU_2RdYRxaoy7P2yfqrD7Y8UFexv8kpTyK0LwkDQ==@190.103.177.163:80
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Configuration
|
|
127
|
+
|
|
128
|
+
### Cache Duration
|
|
129
|
+
|
|
130
|
+
You can adjust the proxy cache duration:
|
|
131
|
+
|
|
132
|
+
```python
|
|
133
|
+
from webscout.Provider.OPENAI.autoproxy import set_proxy_cache_duration
|
|
134
|
+
|
|
135
|
+
# Set cache to 10 minutes
|
|
136
|
+
set_proxy_cache_duration(600)
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### Force Refresh
|
|
140
|
+
|
|
141
|
+
You can force refresh the proxy cache:
|
|
142
|
+
|
|
143
|
+
```python
|
|
144
|
+
from webscout.Provider.OPENAI.autoproxy import refresh_proxy_cache
|
|
145
|
+
|
|
146
|
+
# Force refresh and get number of proxies loaded
|
|
147
|
+
count = refresh_proxy_cache()
|
|
148
|
+
print(f"Loaded {count} proxies")
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## Error Handling
|
|
152
|
+
|
|
153
|
+
The system gracefully handles errors:
|
|
154
|
+
|
|
155
|
+
- If proxy fetching fails, providers work without proxies
|
|
156
|
+
- If a proxy test fails, the system tries other proxies
|
|
157
|
+
- If no working proxy is found, providers fall back to direct connections
|
|
158
|
+
|
|
159
|
+
## Logging
|
|
160
|
+
|
|
161
|
+
The system uses Python's logging module. To see proxy-related logs:
|
|
162
|
+
|
|
163
|
+
```python
|
|
164
|
+
import logging
|
|
165
|
+
logging.basicConfig(level=logging.INFO)
|
|
166
|
+
|
|
167
|
+
# Or specifically for the autoproxy module
|
|
168
|
+
logger = logging.getLogger('webscout.Provider.OPENAI.autoproxy')
|
|
169
|
+
logger.setLevel(logging.DEBUG)
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
## Testing
|
|
173
|
+
|
|
174
|
+
Run the test suite to verify functionality:
|
|
175
|
+
|
|
176
|
+
```bash
|
|
177
|
+
python webscout/Provider/OPENAI/test_autoproxy.py
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## Implementation Details
|
|
181
|
+
|
|
182
|
+
### ProxyAutoMeta Metaclass
|
|
183
|
+
|
|
184
|
+
The `ProxyAutoMeta` metaclass is applied to `OpenAICompatibleProvider` and:
|
|
185
|
+
|
|
186
|
+
1. Intercepts class instantiation
|
|
187
|
+
2. Checks for `disable_auto_proxy` parameter or class attribute
|
|
188
|
+
3. Fetches and configures proxies if not disabled
|
|
189
|
+
4. Patches existing session objects
|
|
190
|
+
5. Adds helper methods to the instance
|
|
191
|
+
|
|
192
|
+
### Session Patching
|
|
193
|
+
|
|
194
|
+
The system automatically patches these session types:
|
|
195
|
+
- `requests.Session` - Updates the `proxies` attribute
|
|
196
|
+
- `httpx.Client` - Sets the `_proxies` attribute
|
|
197
|
+
- `curl_cffi.Session` - Updates the `proxies` attribute
|
|
198
|
+
- `curl_cffi.AsyncSession` - Updates the `proxies` attribute
|
|
199
|
+
|
|
200
|
+
### Proxy Source
|
|
201
|
+
|
|
202
|
+
Proxies are fetched from: `http://207.180.209.185:5000/ips.txt`
|
|
203
|
+
|
|
204
|
+
The system expects one proxy per line in the format shown above.
|
|
205
|
+
|
|
206
|
+
## Troubleshooting
|
|
207
|
+
|
|
208
|
+
### No Proxies Available
|
|
209
|
+
|
|
210
|
+
If you see "No proxies available" messages:
|
|
211
|
+
1. Check if the proxy source URL is accessible
|
|
212
|
+
2. Verify your internet connection
|
|
213
|
+
3. Check if the proxy format is correct
|
|
214
|
+
|
|
215
|
+
### Proxy Test Failures
|
|
216
|
+
|
|
217
|
+
If proxy tests fail:
|
|
218
|
+
1. Some proxies may be temporarily unavailable (normal)
|
|
219
|
+
2. The test URL (`https://httpbin.org/ip`) may be blocked
|
|
220
|
+
3. Network connectivity issues
|
|
221
|
+
|
|
222
|
+
### Provider Not Getting Proxies
|
|
223
|
+
|
|
224
|
+
If a provider doesn't get automatic proxies:
|
|
225
|
+
1. Ensure it inherits from `OpenAICompatibleProvider`
|
|
226
|
+
2. Check if `disable_auto_proxy` is set
|
|
227
|
+
3. Verify the metaclass is properly imported
|
|
228
|
+
|
|
229
|
+
## Contributing
|
|
230
|
+
|
|
231
|
+
To add proxy support to a new provider:
|
|
232
|
+
|
|
233
|
+
1. Inherit from `OpenAICompatibleProvider`
|
|
234
|
+
2. Accept `disable_auto_proxy` parameter in `__init__`
|
|
235
|
+
3. Use `self.proxies` for HTTP requests
|
|
236
|
+
4. Optionally use helper methods like `self.get_proxied_session()`
|
|
237
|
+
|
|
238
|
+
The metaclass will handle the rest automatically!
|
|
@@ -208,21 +208,15 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
208
208
|
OpenAI-compatible client for TogetherAI API.
|
|
209
209
|
"""
|
|
210
210
|
AVAILABLE_MODELS = [
|
|
211
|
-
"Gryphe/MythoMax-L2-13b",
|
|
212
|
-
"Gryphe/MythoMax-L2-13b-Lite",
|
|
213
211
|
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
|
214
212
|
"Qwen/QwQ-32B",
|
|
215
213
|
"Qwen/Qwen2-72B-Instruct",
|
|
216
214
|
"Qwen/Qwen2-VL-72B-Instruct",
|
|
217
215
|
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
218
216
|
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
219
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
220
217
|
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
221
|
-
"Qwen/Qwen3-235B-A22B-fp8",
|
|
222
218
|
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
223
|
-
"
|
|
224
|
-
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
|
|
225
|
-
"Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
|
|
219
|
+
"Salesforce/Llama-Rank-V1",
|
|
226
220
|
"arcee-ai/arcee-blitz",
|
|
227
221
|
"arcee-ai/caller",
|
|
228
222
|
"arcee-ai/coder-large",
|
|
@@ -237,13 +231,12 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
237
231
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
238
232
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
239
233
|
"deepseek-ai/DeepSeek-V3",
|
|
240
|
-
"deepseek-ai/DeepSeek-V3-p-dp",
|
|
241
234
|
"google/gemma-2-27b-it",
|
|
242
|
-
"google/gemma-2b-it",
|
|
243
235
|
"lgai/exaone-3-5-32b-instruct",
|
|
244
236
|
"lgai/exaone-deep-32b",
|
|
245
237
|
"marin-community/marin-8b-instruct",
|
|
246
|
-
"meta-llama
|
|
238
|
+
"meta-llama-llama-2-70b-hf",
|
|
239
|
+
"meta-llama/Llama-2-70b-hf",
|
|
247
240
|
"meta-llama/Llama-3-8b-chat-hf",
|
|
248
241
|
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
249
242
|
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
@@ -265,14 +258,8 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
265
258
|
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
266
259
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
267
260
|
"perplexity-ai/r1-1776",
|
|
268
|
-
"roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
|
|
269
|
-
"roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
|
|
270
|
-
"roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
|
|
271
261
|
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
272
|
-
"scb10x/scb10x-
|
|
273
|
-
"togethercomputer/MoA-1",
|
|
274
|
-
"togethercomputer/MoA-1-Turbo",
|
|
275
|
-
"togethercomputer/Refuel-Llm-V2",
|
|
262
|
+
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
276
263
|
"togethercomputer/Refuel-Llm-V2-Small",
|
|
277
264
|
]
|
|
278
265
|
|
|
@@ -40,4 +40,20 @@ from .oivscode import * # Add OnRender provider
|
|
|
40
40
|
from .Qwen3 import *
|
|
41
41
|
from .FalconH1 import *
|
|
42
42
|
from .PI import * # Add PI.ai provider
|
|
43
|
-
from .TogetherAI import * # Add TogetherAI provider
|
|
43
|
+
from .TogetherAI import * # Add TogetherAI provider
|
|
44
|
+
from .xenai import * # Add XenAI provider
|
|
45
|
+
from .GeminiProxy import * # Add GeminiProxy provider
|
|
46
|
+
from .friendli import *
|
|
47
|
+
from .monochat import *
|
|
48
|
+
from .MiniMax import * # Add MiniMaxAI provider
|
|
49
|
+
# Export auto-proxy functionality
|
|
50
|
+
from .autoproxy import (
|
|
51
|
+
get_auto_proxy,
|
|
52
|
+
get_proxy_dict,
|
|
53
|
+
get_working_proxy,
|
|
54
|
+
test_proxy,
|
|
55
|
+
get_proxy_stats,
|
|
56
|
+
refresh_proxy_cache,
|
|
57
|
+
set_proxy_cache_duration,
|
|
58
|
+
ProxyAutoMeta
|
|
59
|
+
)
|