webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (117) hide show
  1. webscout/AIutel.py +367 -41
  2. webscout/Bard.py +2 -22
  3. webscout/Bing_search.py +1 -2
  4. webscout/Provider/AISEARCH/__init__.py +1 -0
  5. webscout/Provider/AISEARCH/scira_search.py +24 -11
  6. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  7. webscout/Provider/Deepinfra.py +75 -57
  8. webscout/Provider/ExaChat.py +93 -63
  9. webscout/Provider/Flowith.py +1 -1
  10. webscout/Provider/FreeGemini.py +2 -2
  11. webscout/Provider/Gemini.py +3 -10
  12. webscout/Provider/GeminiProxy.py +31 -5
  13. webscout/Provider/HeckAI.py +85 -80
  14. webscout/Provider/Jadve.py +56 -50
  15. webscout/Provider/LambdaChat.py +39 -31
  16. webscout/Provider/MiniMax.py +207 -0
  17. webscout/Provider/Nemotron.py +41 -13
  18. webscout/Provider/Netwrck.py +39 -59
  19. webscout/Provider/OLLAMA.py +8 -9
  20. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  21. webscout/Provider/OPENAI/MiniMax.py +298 -0
  22. webscout/Provider/OPENAI/README.md +31 -30
  23. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  24. webscout/Provider/OPENAI/__init__.py +4 -2
  25. webscout/Provider/OPENAI/autoproxy.py +753 -18
  26. webscout/Provider/OPENAI/base.py +7 -76
  27. webscout/Provider/OPENAI/copilot.py +73 -26
  28. webscout/Provider/OPENAI/deepinfra.py +96 -132
  29. webscout/Provider/OPENAI/exachat.py +9 -5
  30. webscout/Provider/OPENAI/flowith.py +179 -166
  31. webscout/Provider/OPENAI/friendli.py +233 -0
  32. webscout/Provider/OPENAI/monochat.py +329 -0
  33. webscout/Provider/OPENAI/netwrck.py +4 -7
  34. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  35. webscout/Provider/OPENAI/qodo.py +630 -0
  36. webscout/Provider/OPENAI/scirachat.py +82 -49
  37. webscout/Provider/OPENAI/textpollinations.py +13 -12
  38. webscout/Provider/OPENAI/toolbaz.py +1 -0
  39. webscout/Provider/OPENAI/typegpt.py +4 -4
  40. webscout/Provider/OPENAI/utils.py +19 -42
  41. webscout/Provider/OPENAI/x0gpt.py +14 -2
  42. webscout/Provider/OpenGPT.py +54 -32
  43. webscout/Provider/PI.py +58 -84
  44. webscout/Provider/Qodo.py +454 -0
  45. webscout/Provider/StandardInput.py +32 -13
  46. webscout/Provider/TTI/README.md +9 -9
  47. webscout/Provider/TTI/__init__.py +2 -1
  48. webscout/Provider/TTI/aiarta.py +92 -78
  49. webscout/Provider/TTI/infip.py +212 -0
  50. webscout/Provider/TTI/monochat.py +220 -0
  51. webscout/Provider/TeachAnything.py +11 -3
  52. webscout/Provider/TextPollinationsAI.py +91 -82
  53. webscout/Provider/TogetherAI.py +32 -48
  54. webscout/Provider/Venice.py +37 -46
  55. webscout/Provider/VercelAI.py +27 -24
  56. webscout/Provider/WiseCat.py +35 -35
  57. webscout/Provider/WrDoChat.py +22 -26
  58. webscout/Provider/WritingMate.py +26 -22
  59. webscout/Provider/__init__.py +6 -6
  60. webscout/Provider/copilot.py +58 -61
  61. webscout/Provider/freeaichat.py +64 -55
  62. webscout/Provider/granite.py +48 -57
  63. webscout/Provider/koala.py +51 -39
  64. webscout/Provider/learnfastai.py +49 -64
  65. webscout/Provider/llmchat.py +79 -93
  66. webscout/Provider/llmchatco.py +63 -78
  67. webscout/Provider/monochat.py +275 -0
  68. webscout/Provider/multichat.py +51 -40
  69. webscout/Provider/oivscode.py +1 -1
  70. webscout/Provider/scira_chat.py +257 -104
  71. webscout/Provider/scnet.py +13 -13
  72. webscout/Provider/searchchat.py +13 -13
  73. webscout/Provider/sonus.py +12 -11
  74. webscout/Provider/toolbaz.py +25 -8
  75. webscout/Provider/turboseek.py +41 -42
  76. webscout/Provider/typefully.py +27 -12
  77. webscout/Provider/typegpt.py +43 -48
  78. webscout/Provider/uncovr.py +55 -90
  79. webscout/Provider/x0gpt.py +325 -299
  80. webscout/Provider/yep.py +79 -96
  81. webscout/__init__.py +7 -2
  82. webscout/auth/__init__.py +12 -1
  83. webscout/auth/providers.py +27 -5
  84. webscout/auth/routes.py +146 -105
  85. webscout/auth/server.py +367 -312
  86. webscout/client.py +121 -116
  87. webscout/litagent/Readme.md +68 -55
  88. webscout/litagent/agent.py +99 -9
  89. webscout/version.py +1 -1
  90. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
  91. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
  92. webscout/Provider/AI21.py +0 -177
  93. webscout/Provider/HuggingFaceChat.py +0 -469
  94. webscout/Provider/OPENAI/freeaichat.py +0 -363
  95. webscout/Provider/TTI/fastflux.py +0 -233
  96. webscout/Provider/Writecream.py +0 -246
  97. webscout/auth/static/favicon.svg +0 -11
  98. webscout/auth/swagger_ui.py +0 -203
  99. webscout/auth/templates/components/authentication.html +0 -237
  100. webscout/auth/templates/components/base.html +0 -103
  101. webscout/auth/templates/components/endpoints.html +0 -750
  102. webscout/auth/templates/components/examples.html +0 -491
  103. webscout/auth/templates/components/footer.html +0 -75
  104. webscout/auth/templates/components/header.html +0 -27
  105. webscout/auth/templates/components/models.html +0 -286
  106. webscout/auth/templates/components/navigation.html +0 -70
  107. webscout/auth/templates/static/api.js +0 -455
  108. webscout/auth/templates/static/icons.js +0 -168
  109. webscout/auth/templates/static/main.js +0 -784
  110. webscout/auth/templates/static/particles.js +0 -201
  111. webscout/auth/templates/static/styles.css +0 -3353
  112. webscout/auth/templates/static/ui.js +0 -374
  113. webscout/auth/templates/swagger_ui.html +0 -170
  114. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
  115. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
  116. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
  117. {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,298 @@
1
+ import os
2
+ import requests
3
+ import json
4
+ import time
5
+ import uuid
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ class Completions(BaseCompletions):
15
+ def __init__(self, client: 'MiniMax'):
16
+ self._client = client
17
+
18
+ def create(
19
+ self,
20
+ *,
21
+ model: str,
22
+ messages: List[Dict[str, str]],
23
+ max_tokens: Optional[int] = None,
24
+ stream: bool = False,
25
+ temperature: Optional[float] = None,
26
+ top_p: Optional[float] = None,
27
+ timeout: Optional[int] = None,
28
+ proxies: Optional[Dict[str, str]] = None,
29
+ stop: Optional[Union[str, List[str]]] = None,
30
+ **kwargs: Any
31
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
32
+ """
33
+ Creates a model response for the given chat conversation.
34
+ Mimics openai.chat.completions.create
35
+ """
36
+ api_key = self._client.api_key
37
+ if not api_key:
38
+ raise Exception("MINIMAX_API_KEY not set in environment.")
39
+ model_name = self._client.convert_model_name(model)
40
+ payload = {
41
+ "model": model_name,
42
+ "messages": messages,
43
+ "stream": stream,
44
+ }
45
+ if max_tokens is not None:
46
+ payload["max_tokens"] = max_tokens
47
+ if temperature is not None:
48
+ payload["temperature"] = temperature
49
+ if top_p is not None:
50
+ payload["top_p"] = top_p
51
+ if stop is not None:
52
+ payload["stop"] = stop
53
+ payload.update(kwargs)
54
+ request_id = f"chatcmpl-{uuid.uuid4()}"
55
+ created_time = int(time.time())
56
+ if stream:
57
+ return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
58
+ else:
59
+ return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
60
+
61
+ def _create_stream(
62
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
63
+ ) -> Generator[ChatCompletionChunk, None, None]:
64
+ try:
65
+ headers = {
66
+ 'Content-Type': 'application/json',
67
+ 'Authorization': f'Bearer {self._client.api_key}',
68
+ }
69
+ response = self._client.session.post(
70
+ self._client.api_endpoint,
71
+ headers=headers,
72
+ data=json.dumps(payload),
73
+ stream=True,
74
+ timeout=timeout or self._client.timeout,
75
+ proxies=proxies
76
+ )
77
+ response.raise_for_status()
78
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
79
+ completion_tokens = 0
80
+ total_tokens = prompt_tokens
81
+ streaming_response = ""
82
+ last_content = ""
83
+ last_reasoning = ""
84
+ in_think = False
85
+ for line in response.iter_lines():
86
+ if line:
87
+ line = line.decode('utf-8')
88
+ if line.startswith('data: '):
89
+ line = line[6:]
90
+ if line.strip() == '[DONE]':
91
+ break
92
+ try:
93
+ chunk_data = json.loads(line)
94
+ if 'choices' in chunk_data and chunk_data['choices']:
95
+ choice_data = chunk_data['choices'][0]
96
+ delta = choice_data.get('delta', {})
97
+ content = delta.get('content')
98
+ reasoning_content = delta.get('reasoning_content')
99
+ finish_reason = choice_data.get('finish_reason')
100
+ # Only yield <think> and reasoning_content if reasoning_content is not empty
101
+ if reasoning_content and reasoning_content.strip() and reasoning_content != last_reasoning:
102
+ if not in_think:
103
+ yield ChatCompletionChunk(
104
+ id=request_id,
105
+ choices=[Choice(index=0, delta=ChoiceDelta(content='<think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
106
+ created=created_time,
107
+ model=model
108
+ )
109
+ in_think = True
110
+ yield ChatCompletionChunk(
111
+ id=request_id,
112
+ choices=[Choice(index=0, delta=ChoiceDelta(content=reasoning_content, role=None, tool_calls=None), finish_reason=None, logprobs=None)],
113
+ created=created_time,
114
+ model=model
115
+ )
116
+ last_reasoning = reasoning_content
117
+ # Only yield </think> if we were in <think> and now have new content
118
+ if in_think and content and content.strip() and content != last_content:
119
+ yield ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[Choice(index=0, delta=ChoiceDelta(content='</think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
122
+ created=created_time,
123
+ model=model
124
+ )
125
+ in_think = False
126
+ # Only yield content if it is not empty
127
+ if content and content.strip() and content != last_content:
128
+ completion_tokens += count_tokens(content)
129
+ total_tokens = prompt_tokens + completion_tokens
130
+ choice_delta = ChoiceDelta(
131
+ content=content,
132
+ role=delta.get('role', 'assistant'),
133
+ tool_calls=delta.get('tool_calls')
134
+ )
135
+ choice = Choice(
136
+ index=0,
137
+ delta=choice_delta,
138
+ finish_reason=finish_reason,
139
+ logprobs=None
140
+ )
141
+ chunk = ChatCompletionChunk(
142
+ id=request_id,
143
+ choices=[choice],
144
+ created=created_time,
145
+ model=model
146
+ )
147
+ chunk.usage = {
148
+ "prompt_tokens": prompt_tokens,
149
+ "completion_tokens": completion_tokens,
150
+ "total_tokens": total_tokens,
151
+ "estimated_cost": None
152
+ }
153
+ yield chunk
154
+ streaming_response += content
155
+ last_content = content
156
+ except Exception:
157
+ continue
158
+ # Final chunk with finish_reason="stop"
159
+ delta = ChoiceDelta(content=None, role=None, tool_calls=None)
160
+ choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
161
+ chunk = ChatCompletionChunk(
162
+ id=request_id,
163
+ choices=[choice],
164
+ created=created_time,
165
+ model=model
166
+ )
167
+ chunk.usage = {
168
+ "prompt_tokens": prompt_tokens,
169
+ "completion_tokens": completion_tokens,
170
+ "total_tokens": total_tokens,
171
+ "estimated_cost": None
172
+ }
173
+ yield chunk
174
+ except Exception as e:
175
+ raise IOError(f"MiniMax stream request failed: {e}") from e
176
+
177
+ def _create_non_stream(
178
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
179
+ ) -> ChatCompletion:
180
+ try:
181
+ headers = {
182
+ 'Content-Type': 'application/json',
183
+ 'Authorization': f'Bearer {self._client.api_key}',
184
+ }
185
+ payload_copy = payload.copy()
186
+ payload_copy["stream"] = False
187
+ response = self._client.session.post(
188
+ self._client.api_endpoint,
189
+ headers=headers,
190
+ data=json.dumps(payload_copy),
191
+ timeout=timeout or self._client.timeout,
192
+ proxies=proxies
193
+ )
194
+ response.raise_for_status()
195
+ data = response.json()
196
+ full_text = ""
197
+ finish_reason = "stop"
198
+ if 'choices' in data and data['choices']:
199
+ choice_data = data['choices'][0]
200
+ # MiniMax returns content in 'message' or directly in 'delta' for streaming
201
+ reasoning_content = ""
202
+ if 'message' in choice_data and choice_data['message']:
203
+ full_text = choice_data['message'].get('content', '')
204
+ reasoning_content = choice_data['message'].get('reasoning_content', '')
205
+ elif 'delta' in choice_data and choice_data['delta']:
206
+ full_text = choice_data['delta'].get('content', '')
207
+ reasoning_content = choice_data['delta'].get('reasoning_content', '')
208
+ finish_reason = choice_data.get('finish_reason', 'stop')
209
+ # If both are present, concatenate with <think> ... </think>
210
+ if reasoning_content and reasoning_content.strip():
211
+ if full_text and full_text.strip():
212
+ full_text = f"<think>\n\n{reasoning_content}</think>\n\n{full_text}"
213
+ else:
214
+ full_text = f"<think>\n\n{reasoning_content}</think>\n\n"
215
+ message = ChatCompletionMessage(
216
+ role="assistant",
217
+ content=full_text
218
+ )
219
+ choice = Choice(
220
+ index=0,
221
+ message=message,
222
+ finish_reason=finish_reason
223
+ )
224
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
225
+ completion_tokens = count_tokens(full_text)
226
+ usage = CompletionUsage(
227
+ prompt_tokens=prompt_tokens,
228
+ completion_tokens=completion_tokens,
229
+ total_tokens=prompt_tokens + completion_tokens
230
+ )
231
+ completion = ChatCompletion(
232
+ id=request_id,
233
+ choices=[choice],
234
+ created=created_time,
235
+ model=model,
236
+ usage=usage,
237
+ )
238
+ return completion
239
+ except Exception as e:
240
+ raise IOError(f"MiniMax non-stream request failed: {e}") from e
241
+
242
+ class Chat(BaseChat):
243
+ def __init__(self, client: 'MiniMax'):
244
+ self.completions = Completions(client)
245
+
246
+ class MiniMax(OpenAICompatibleProvider):
247
+ """
248
+ OpenAI-compatible client for MiniMax API.
249
+ """
250
+ AVAILABLE_MODELS = [
251
+ "MiniMax-Reasoning-01"
252
+ ]
253
+ def __init__(self, timeout: int = 30):
254
+ self.timeout = timeout
255
+ self.api_endpoint = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
256
+ self.session = requests.Session()
257
+ self.api_key = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"
258
+ self.chat = Chat(self)
259
+
260
+ @property
261
+ def models(self):
262
+ class _ModelList:
263
+ def list(inner_self):
264
+ return MiniMax.AVAILABLE_MODELS
265
+ return _ModelList()
266
+
267
+ def convert_model_name(self, model: str) -> str:
268
+ if model in self.AVAILABLE_MODELS:
269
+ return model
270
+ return self.AVAILABLE_MODELS[0]
271
+
272
+ if __name__ == "__main__":
273
+ from rich import print
274
+ client = MiniMax()
275
+ messages = [
276
+ {"role": "user", "content": "What is the capital of France?"}
277
+ ]
278
+ # Non-streaming example
279
+ response = client.chat.completions.create(
280
+ model="MiniMax-Reasoning-01",
281
+ messages=messages,
282
+ max_tokens=5000,
283
+ stream=False
284
+ )
285
+ print("Non-streaming response:")
286
+ print(response)
287
+ # Streaming example
288
+ print("\nStreaming response:")
289
+ stream = client.chat.completions.create(
290
+ model="MiniMax-Reasoning-01",
291
+ messages=messages,
292
+ max_tokens=5000,
293
+ stream=True
294
+ )
295
+ for chunk in stream:
296
+ if chunk.choices[0].delta and chunk.choices[0].delta.content:
297
+ print(chunk.choices[0].delta.content, end="")
298
+ print()
@@ -21,9 +21,9 @@
21
21
 
22
22
  The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
23
 
24
- * Use the same code structure across different AI providers
25
- * Switch between providers without major code changes
26
- * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
24
+ - Use the same code structure across different AI providers
25
+ - Switch between providers without major code changes
26
+ - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
27
 
28
28
  ## ⚙️ Available Providers
29
29
 
@@ -39,7 +39,6 @@ Currently, the following providers are implemented with OpenAI-compatible interf
39
39
  - TypeGPT
40
40
  - SciraChat
41
41
  - LLMChatCo
42
- - FreeAIChat
43
42
  - YEPCHAT
44
43
  - HeckAI
45
44
  - SonusAI
@@ -70,8 +69,10 @@ Currently, the following providers are implemented with OpenAI-compatible interf
70
69
  - FalconH1
71
70
  - XenAI
72
71
  - GeminiProxy
73
- ---
74
-
72
+ - MonoChat
73
+ - Friendli
74
+ - MiniMax
75
+ - QodoAI
75
76
 
76
77
  ## 💻 Usage Examples
77
78
 
@@ -909,17 +910,17 @@ All providers return responses that mimic the OpenAI API structure, ensuring com
909
910
 
910
911
  The OpenAI-compatible providers are built on a modular architecture:
911
912
 
912
- * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
913
- * `utils.py`: Provides data structures that mimic OpenAI's response format
914
- * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
913
+ - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
914
+ - `utils.py`: Provides data structures that mimic OpenAI's response format
915
+ - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
915
916
 
916
917
  This architecture makes it easy to add new providers while maintaining a consistent interface.
917
918
 
918
919
  ## 📝 Notes
919
920
 
920
- * Some providers may require API keys for full functionality
921
- * Not all OpenAI features are supported by all providers
922
- * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
921
+ - Some providers may require API keys for full functionality
922
+ - Not all OpenAI features are supported by all providers
923
+ - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
923
924
 
924
925
  ## 🤝 Contributing
925
926
 
@@ -932,24 +933,24 @@ Want to add a new OpenAI-compatible provider? Follow these steps:
932
933
 
933
934
  ## 📚 Related Documentation
934
935
 
935
- * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
936
- * [DeepInfra Documentation](https://deepinfra.com/docs)
937
- * [Glider.so Website](https://glider.so/)
938
- * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
939
- * [X0GPT Website](https://x0-gpt.devwtf.in/)
940
- * [WiseCat Website](https://wise-cat-groq.vercel.app/)
941
- * [Venice AI Website](https://venice.ai/)
942
- * [ExaAI Website](https://o3minichat.exa.ai/)
943
- * [TypeGPT Website](https://chat.typegpt.net/)
944
- * [SciraChat Website](https://scira.ai/)
945
- * [FreeAIChat Website](https://freeaichatplayground.com/)
946
- * [LLMChatCo Website](https://llmchat.co/)
947
- * [Yep.com Website](https://yep.com/)
948
- * [HeckAI Website](https://heck.ai/)
949
- * [SonusAI Website](https://chat.sonus.ai/)
950
- * [ExaChat Website](https://exa-chat.vercel.app/)
951
- * [Netwrck Website](https://netwrck.com/)
952
- * [StandardInput Website](https://chat.standard-input.com/)
936
+ - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
937
+ - [DeepInfra Documentation](https://deepinfra.com/docs)
938
+ - [Glider.so Website](https://glider.so/)
939
+ - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
940
+ - [X0GPT Website](https://x0-gpt.devwtf.in/)
941
+ - [WiseCat Website](https://wise-cat-groq.vercel.app/)
942
+ - [Venice AI Website](https://venice.ai/)
943
+ - [ExaAI Website](https://o3minichat.exa.ai/)
944
+ - [TypeGPT Website](https://chat.typegpt.net/)
945
+ - [SciraChat Website](https://scira.ai/)
946
+ - [FreeAIChat Website](https://freeaichatplayground.com/)
947
+ - [LLMChatCo Website](https://llmchat.co/)
948
+ - [Yep.com Website](https://yep.com/)
949
+ - [HeckAI Website](https://heck.ai/)
950
+ - [SonusAI Website](https://chat.sonus.ai/)
951
+ - [ExaChat Website](https://exa-chat.vercel.app/)
952
+ - [Netwrck Website](https://netwrck.com/)
953
+ - [StandardInput Website](https://chat.standard-input.com/)
953
954
 
954
955
  <div align="center">
955
956
  <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
@@ -208,21 +208,15 @@ class TogetherAI(OpenAICompatibleProvider):
208
208
  OpenAI-compatible client for TogetherAI API.
209
209
  """
210
210
  AVAILABLE_MODELS = [
211
- "Gryphe/MythoMax-L2-13b",
212
- "Gryphe/MythoMax-L2-13b-Lite",
213
211
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
214
212
  "Qwen/QwQ-32B",
215
213
  "Qwen/Qwen2-72B-Instruct",
216
214
  "Qwen/Qwen2-VL-72B-Instruct",
217
215
  "Qwen/Qwen2.5-72B-Instruct-Turbo",
218
216
  "Qwen/Qwen2.5-7B-Instruct-Turbo",
219
- "Qwen/Qwen2.5-Coder-32B-Instruct",
220
217
  "Qwen/Qwen2.5-VL-72B-Instruct",
221
- "Qwen/Qwen3-235B-A22B-fp8",
222
218
  "Qwen/Qwen3-235B-A22B-fp8-tput",
223
- "Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
224
- "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
225
- "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
219
+ "Salesforce/Llama-Rank-V1",
226
220
  "arcee-ai/arcee-blitz",
227
221
  "arcee-ai/caller",
228
222
  "arcee-ai/coder-large",
@@ -237,13 +231,12 @@ class TogetherAI(OpenAICompatibleProvider):
237
231
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
238
232
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
239
233
  "deepseek-ai/DeepSeek-V3",
240
- "deepseek-ai/DeepSeek-V3-p-dp",
241
234
  "google/gemma-2-27b-it",
242
- "google/gemma-2b-it",
243
235
  "lgai/exaone-3-5-32b-instruct",
244
236
  "lgai/exaone-deep-32b",
245
237
  "marin-community/marin-8b-instruct",
246
- "meta-llama/Llama-3-70b-chat-hf",
238
+ "meta-llama-llama-2-70b-hf",
239
+ "meta-llama/Llama-2-70b-hf",
247
240
  "meta-llama/Llama-3-8b-chat-hf",
248
241
  "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
249
242
  "meta-llama/Llama-3.2-3B-Instruct-Turbo",
@@ -265,14 +258,8 @@ class TogetherAI(OpenAICompatibleProvider):
265
258
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
266
259
  "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
267
260
  "perplexity-ai/r1-1776",
268
- "roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
269
- "roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
270
- "roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
271
261
  "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
272
- "scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
273
- "togethercomputer/MoA-1",
274
- "togethercomputer/MoA-1-Turbo",
275
- "togethercomputer/Refuel-Llm-V2",
262
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
276
263
  "togethercomputer/Refuel-Llm-V2-Small",
277
264
  ]
278
265
 
@@ -8,7 +8,6 @@ from .venice import *
8
8
  from .exaai import *
9
9
  from .typegpt import *
10
10
  from .scirachat import *
11
- from .freeaichat import *
12
11
  from .llmchatco import *
13
12
  from .yep import * # Add YEPCHAT
14
13
  from .heckai import *
@@ -43,7 +42,10 @@ from .PI import * # Add PI.ai provider
43
42
  from .TogetherAI import * # Add TogetherAI provider
44
43
  from .xenai import * # Add XenAI provider
45
44
  from .GeminiProxy import * # Add GeminiProxy provider
46
-
45
+ from .friendli import *
46
+ from .monochat import *
47
+ from .MiniMax import * # Add MiniMaxAI provider
48
+ from .qodo import * # Add QodoAI provider
47
49
  # Export auto-proxy functionality
48
50
  from .autoproxy import (
49
51
  get_auto_proxy,