webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (94) hide show
  1. webscout/AIutel.py +146 -37
  2. webscout/Bing_search.py +1 -2
  3. webscout/Provider/AISEARCH/__init__.py +1 -0
  4. webscout/Provider/AISEARCH/stellar_search.py +132 -0
  5. webscout/Provider/ExaChat.py +84 -58
  6. webscout/Provider/HeckAI.py +85 -80
  7. webscout/Provider/Jadve.py +56 -50
  8. webscout/Provider/MiniMax.py +207 -0
  9. webscout/Provider/Nemotron.py +41 -13
  10. webscout/Provider/Netwrck.py +34 -51
  11. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
  12. webscout/Provider/OPENAI/MiniMax.py +298 -0
  13. webscout/Provider/OPENAI/README.md +30 -29
  14. webscout/Provider/OPENAI/TogetherAI.py +4 -17
  15. webscout/Provider/OPENAI/__init__.py +3 -1
  16. webscout/Provider/OPENAI/autoproxy.py +752 -17
  17. webscout/Provider/OPENAI/base.py +7 -76
  18. webscout/Provider/OPENAI/deepinfra.py +42 -108
  19. webscout/Provider/OPENAI/flowith.py +179 -166
  20. webscout/Provider/OPENAI/friendli.py +233 -0
  21. webscout/Provider/OPENAI/monochat.py +329 -0
  22. webscout/Provider/OPENAI/pydantic_imports.py +1 -172
  23. webscout/Provider/OPENAI/toolbaz.py +1 -0
  24. webscout/Provider/OPENAI/typegpt.py +1 -1
  25. webscout/Provider/OPENAI/utils.py +19 -42
  26. webscout/Provider/OPENAI/x0gpt.py +14 -2
  27. webscout/Provider/OpenGPT.py +54 -32
  28. webscout/Provider/PI.py +58 -84
  29. webscout/Provider/StandardInput.py +32 -13
  30. webscout/Provider/TTI/README.md +9 -9
  31. webscout/Provider/TTI/__init__.py +2 -1
  32. webscout/Provider/TTI/aiarta.py +92 -78
  33. webscout/Provider/TTI/infip.py +212 -0
  34. webscout/Provider/TTI/monochat.py +220 -0
  35. webscout/Provider/TeachAnything.py +11 -3
  36. webscout/Provider/TextPollinationsAI.py +78 -70
  37. webscout/Provider/TogetherAI.py +32 -48
  38. webscout/Provider/Venice.py +37 -46
  39. webscout/Provider/VercelAI.py +27 -24
  40. webscout/Provider/WiseCat.py +35 -35
  41. webscout/Provider/WrDoChat.py +22 -26
  42. webscout/Provider/WritingMate.py +26 -22
  43. webscout/Provider/__init__.py +2 -2
  44. webscout/Provider/granite.py +48 -57
  45. webscout/Provider/koala.py +51 -39
  46. webscout/Provider/learnfastai.py +49 -64
  47. webscout/Provider/llmchat.py +79 -93
  48. webscout/Provider/llmchatco.py +63 -78
  49. webscout/Provider/multichat.py +51 -40
  50. webscout/Provider/oivscode.py +1 -1
  51. webscout/Provider/scira_chat.py +159 -96
  52. webscout/Provider/scnet.py +13 -13
  53. webscout/Provider/searchchat.py +13 -13
  54. webscout/Provider/sonus.py +12 -11
  55. webscout/Provider/toolbaz.py +25 -8
  56. webscout/Provider/turboseek.py +41 -42
  57. webscout/Provider/typefully.py +27 -12
  58. webscout/Provider/typegpt.py +41 -46
  59. webscout/Provider/uncovr.py +55 -90
  60. webscout/Provider/x0gpt.py +33 -17
  61. webscout/Provider/yep.py +79 -96
  62. webscout/auth/__init__.py +12 -1
  63. webscout/auth/providers.py +27 -5
  64. webscout/auth/routes.py +128 -104
  65. webscout/auth/server.py +367 -312
  66. webscout/client.py +121 -116
  67. webscout/litagent/Readme.md +68 -55
  68. webscout/litagent/agent.py +99 -9
  69. webscout/version.py +1 -1
  70. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
  71. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
  72. webscout/Provider/TTI/fastflux.py +0 -233
  73. webscout/Provider/Writecream.py +0 -246
  74. webscout/auth/static/favicon.svg +0 -11
  75. webscout/auth/swagger_ui.py +0 -203
  76. webscout/auth/templates/components/authentication.html +0 -237
  77. webscout/auth/templates/components/base.html +0 -103
  78. webscout/auth/templates/components/endpoints.html +0 -750
  79. webscout/auth/templates/components/examples.html +0 -491
  80. webscout/auth/templates/components/footer.html +0 -75
  81. webscout/auth/templates/components/header.html +0 -27
  82. webscout/auth/templates/components/models.html +0 -286
  83. webscout/auth/templates/components/navigation.html +0 -70
  84. webscout/auth/templates/static/api.js +0 -455
  85. webscout/auth/templates/static/icons.js +0 -168
  86. webscout/auth/templates/static/main.js +0 -784
  87. webscout/auth/templates/static/particles.js +0 -201
  88. webscout/auth/templates/static/styles.css +0 -3353
  89. webscout/auth/templates/static/ui.js +0 -374
  90. webscout/auth/templates/swagger_ui.html +0 -170
  91. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
  92. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
  93. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
  94. {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
@@ -110,7 +110,8 @@ class NEMOTRON(Provider):
110
110
  def _make_request(
111
111
  self,
112
112
  message: str,
113
- stream: bool = False
113
+ stream: bool = False,
114
+ raw: bool = False
114
115
  ) -> Generator[str, None, None]:
115
116
  """Make request to NEMOTRON API."""
116
117
  payload = {
@@ -131,10 +132,26 @@ class NEMOTRON(Provider):
131
132
  timeout=self.timeout
132
133
  ) as response:
133
134
  response.raise_for_status()
134
- yield from sanitize_stream(
135
- response.iter_content(chunk_size=1024),
136
- to_json=False,
137
- )
135
+ buffer = ""
136
+ chunk_size = 32
137
+ for chunk in response.iter_content(chunk_size=chunk_size):
138
+ if not chunk:
139
+ continue
140
+ text = chunk.decode(errors="ignore")
141
+ buffer += text
142
+ while len(buffer) >= chunk_size:
143
+ out = buffer[:chunk_size]
144
+ buffer = buffer[chunk_size:]
145
+ if out.strip():
146
+ if raw:
147
+ yield out
148
+ else:
149
+ yield out
150
+ if buffer.strip():
151
+ if raw:
152
+ yield buffer
153
+ else:
154
+ yield buffer
138
155
  else:
139
156
  response = self.session.post(
140
157
  self.url,
@@ -143,7 +160,10 @@ class NEMOTRON(Provider):
143
160
  timeout=self.timeout
144
161
  )
145
162
  response.raise_for_status()
146
- yield response.text
163
+ if raw:
164
+ yield response.text
165
+ else:
166
+ yield response.text
147
167
 
148
168
  except requests.exceptions.RequestException as e:
149
169
  raise exceptions.ProviderConnectionError(f"Connection error: {str(e)}")
@@ -167,13 +187,20 @@ class NEMOTRON(Provider):
167
187
  raise ValueError(f"Optimizer is not one of {self.__available_optimizers}")
168
188
 
169
189
  def for_stream():
170
- for text in self._make_request(conversation_prompt, stream=True):
171
- yield {"text": text}
190
+ for text in self._make_request(conversation_prompt, stream=True, raw=raw):
191
+ if raw:
192
+ yield text
193
+ else:
194
+ yield {"text": text}
172
195
 
173
196
  def for_non_stream():
174
- response_text = next(self._make_request(conversation_prompt, stream=False))
175
- self.last_response = {"text": response_text}
176
- return self.last_response
197
+ response_text = next(self._make_request(conversation_prompt, stream=False, raw=raw))
198
+ if raw:
199
+ self.last_response = response_text
200
+ return response_text
201
+ else:
202
+ self.last_response = {"text": response_text}
203
+ return self.last_response
177
204
 
178
205
  return for_stream() if stream else for_non_stream()
179
206
 
@@ -214,5 +241,6 @@ class NEMOTRON(Provider):
214
241
  if __name__ == "__main__":
215
242
  # Example usage
216
243
  nemotron = NEMOTRON()
217
- response = nemotron.chat("Hello, how are you?", stream=False)
218
- print(response)
244
+ response = nemotron.chat("write me about humans in points", stream=True)
245
+ for part in response:
246
+ print(part, end="", flush=True)
@@ -127,75 +127,58 @@ class Netwrck(Provider):
127
127
 
128
128
  def for_stream():
129
129
  try:
130
- # Use curl_cffi session post with impersonate
131
130
  response = self.session.post(
132
131
  "https://netwrck.com/api/chatpred_or",
133
132
  json=payload,
134
- # headers are set on the session
135
- # proxies are set on the session
136
133
  timeout=self.timeout,
137
134
  stream=True,
138
- impersonate="chrome110" # Use a common impersonation profile
135
+ impersonate="chrome110"
139
136
  )
140
- response.raise_for_status() # Check for HTTP errors
141
-
142
- streaming_text = ""
143
- # Use sanitize_stream
144
- processed_stream = sanitize_stream(
145
- data=response.iter_content(chunk_size=None), # Pass byte iterator
146
- intro_value=None, # No prefix
147
- to_json=False, # It's text
148
- content_extractor=self._netwrck_extractor, # Use the quote stripper
149
- yield_raw_on_error=True
150
- )
151
- for content_chunk in processed_stream:
152
- if content_chunk and isinstance(content_chunk, str):
153
- streaming_text += content_chunk
154
- yield {"text": content_chunk} if not raw else content_chunk
155
- # Update history after stream finishes
156
- self.last_response = {"text": streaming_text} # Store aggregated text
157
- self.conversation.update_chat_history(payload["query"], streaming_text)
158
-
159
- except CurlError as e: # Catch CurlError
137
+ response.raise_for_status()
138
+ buffer = ""
139
+ chunk_size = 32
140
+ for chunk in response.iter_content(chunk_size=chunk_size):
141
+ if not chunk:
142
+ continue
143
+ text = chunk.decode(errors="ignore")
144
+ buffer += text
145
+ while len(buffer) >= chunk_size:
146
+ out = buffer[:chunk_size]
147
+ buffer = buffer[chunk_size:]
148
+ if out.strip():
149
+ if raw:
150
+ yield out
151
+ else:
152
+ yield {"text": out}
153
+ if buffer.strip():
154
+ if raw:
155
+ yield buffer
156
+ else:
157
+ yield {"text": buffer}
158
+ self.last_response = {"text": buffer}
159
+ self.conversation.update_chat_history(payload["query"], buffer)
160
+ except CurlError as e:
160
161
  raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
161
- except Exception as e: # Catch other potential exceptions (like HTTPError)
162
+ except Exception as e:
162
163
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
163
164
  raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
164
165
 
165
166
  def for_non_stream():
166
167
  try:
167
- # Use curl_cffi session post with impersonate
168
168
  response = self.session.post(
169
169
  "https://netwrck.com/api/chatpred_or",
170
170
  json=payload,
171
- # headers are set on the session
172
- # proxies are set on the session
173
171
  timeout=self.timeout,
174
- impersonate="chrome110" # Use a common impersonation profile
175
- )
176
- response.raise_for_status() # Check for HTTP errors
177
-
178
- response_text_raw = response.text # Get raw text
179
-
180
- # Process the text using sanitize_stream
181
- processed_stream = sanitize_stream(
182
- data=response_text_raw,
183
- intro_value=None,
184
- to_json=False,
185
- content_extractor=self._netwrck_extractor
172
+ impersonate="chrome110"
186
173
  )
187
- # Aggregate the single result
188
- text = "".join(list(processed_stream))
189
-
190
- self.last_response = {"text": text} # Store processed text
191
- self.conversation.update_chat_history(prompt, text)
192
-
193
- # Return dict or raw string
194
- return text if raw else self.last_response
195
-
196
- except CurlError as e: # Catch CurlError
174
+ response.raise_for_status()
175
+ response_text_raw = response.text
176
+ self.last_response = {"text": response_text_raw}
177
+ self.conversation.update_chat_history(prompt, response_text_raw)
178
+ return response_text_raw if raw else self.last_response
179
+ except CurlError as e:
197
180
  raise exceptions.FailedToGenerateResponseError(f"Network error (CurlError): {str(e)}") from e
198
- except Exception as e: # Catch other potential exceptions (like HTTPError)
181
+ except Exception as e:
199
182
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
200
183
  raise exceptions.FailedToGenerateResponseError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
201
184
 
@@ -1043,4 +1043,3 @@ if __name__ == "__main__":
1043
1043
  )
1044
1044
  for chunk in response:
1045
1045
  print(chunk.choices[0].delta.content, end='', flush=True)
1046
-
@@ -0,0 +1,298 @@
1
+ import os
2
+ import requests
3
+ import json
4
+ import time
5
+ import uuid
6
+ from typing import List, Dict, Optional, Union, Generator, Any
7
+
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
+ ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
+ ChatCompletionMessage, CompletionUsage, count_tokens
12
+ )
13
+
14
+ class Completions(BaseCompletions):
15
+ def __init__(self, client: 'MiniMax'):
16
+ self._client = client
17
+
18
+ def create(
19
+ self,
20
+ *,
21
+ model: str,
22
+ messages: List[Dict[str, str]],
23
+ max_tokens: Optional[int] = None,
24
+ stream: bool = False,
25
+ temperature: Optional[float] = None,
26
+ top_p: Optional[float] = None,
27
+ timeout: Optional[int] = None,
28
+ proxies: Optional[Dict[str, str]] = None,
29
+ stop: Optional[Union[str, List[str]]] = None,
30
+ **kwargs: Any
31
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
32
+ """
33
+ Creates a model response for the given chat conversation.
34
+ Mimics openai.chat.completions.create
35
+ """
36
+ api_key = self._client.api_key
37
+ if not api_key:
38
+ raise Exception("MINIMAX_API_KEY not set in environment.")
39
+ model_name = self._client.convert_model_name(model)
40
+ payload = {
41
+ "model": model_name,
42
+ "messages": messages,
43
+ "stream": stream,
44
+ }
45
+ if max_tokens is not None:
46
+ payload["max_tokens"] = max_tokens
47
+ if temperature is not None:
48
+ payload["temperature"] = temperature
49
+ if top_p is not None:
50
+ payload["top_p"] = top_p
51
+ if stop is not None:
52
+ payload["stop"] = stop
53
+ payload.update(kwargs)
54
+ request_id = f"chatcmpl-{uuid.uuid4()}"
55
+ created_time = int(time.time())
56
+ if stream:
57
+ return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
58
+ else:
59
+ return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
60
+
61
+ def _create_stream(
62
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
63
+ ) -> Generator[ChatCompletionChunk, None, None]:
64
+ try:
65
+ headers = {
66
+ 'Content-Type': 'application/json',
67
+ 'Authorization': f'Bearer {self._client.api_key}',
68
+ }
69
+ response = self._client.session.post(
70
+ self._client.api_endpoint,
71
+ headers=headers,
72
+ data=json.dumps(payload),
73
+ stream=True,
74
+ timeout=timeout or self._client.timeout,
75
+ proxies=proxies
76
+ )
77
+ response.raise_for_status()
78
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
79
+ completion_tokens = 0
80
+ total_tokens = prompt_tokens
81
+ streaming_response = ""
82
+ last_content = ""
83
+ last_reasoning = ""
84
+ in_think = False
85
+ for line in response.iter_lines():
86
+ if line:
87
+ line = line.decode('utf-8')
88
+ if line.startswith('data: '):
89
+ line = line[6:]
90
+ if line.strip() == '[DONE]':
91
+ break
92
+ try:
93
+ chunk_data = json.loads(line)
94
+ if 'choices' in chunk_data and chunk_data['choices']:
95
+ choice_data = chunk_data['choices'][0]
96
+ delta = choice_data.get('delta', {})
97
+ content = delta.get('content')
98
+ reasoning_content = delta.get('reasoning_content')
99
+ finish_reason = choice_data.get('finish_reason')
100
+ # Only yield <think> and reasoning_content if reasoning_content is not empty
101
+ if reasoning_content and reasoning_content.strip() and reasoning_content != last_reasoning:
102
+ if not in_think:
103
+ yield ChatCompletionChunk(
104
+ id=request_id,
105
+ choices=[Choice(index=0, delta=ChoiceDelta(content='<think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
106
+ created=created_time,
107
+ model=model
108
+ )
109
+ in_think = True
110
+ yield ChatCompletionChunk(
111
+ id=request_id,
112
+ choices=[Choice(index=0, delta=ChoiceDelta(content=reasoning_content, role=None, tool_calls=None), finish_reason=None, logprobs=None)],
113
+ created=created_time,
114
+ model=model
115
+ )
116
+ last_reasoning = reasoning_content
117
+ # Only yield </think> if we were in <think> and now have new content
118
+ if in_think and content and content.strip() and content != last_content:
119
+ yield ChatCompletionChunk(
120
+ id=request_id,
121
+ choices=[Choice(index=0, delta=ChoiceDelta(content='</think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
122
+ created=created_time,
123
+ model=model
124
+ )
125
+ in_think = False
126
+ # Only yield content if it is not empty
127
+ if content and content.strip() and content != last_content:
128
+ completion_tokens += count_tokens(content)
129
+ total_tokens = prompt_tokens + completion_tokens
130
+ choice_delta = ChoiceDelta(
131
+ content=content,
132
+ role=delta.get('role', 'assistant'),
133
+ tool_calls=delta.get('tool_calls')
134
+ )
135
+ choice = Choice(
136
+ index=0,
137
+ delta=choice_delta,
138
+ finish_reason=finish_reason,
139
+ logprobs=None
140
+ )
141
+ chunk = ChatCompletionChunk(
142
+ id=request_id,
143
+ choices=[choice],
144
+ created=created_time,
145
+ model=model
146
+ )
147
+ chunk.usage = {
148
+ "prompt_tokens": prompt_tokens,
149
+ "completion_tokens": completion_tokens,
150
+ "total_tokens": total_tokens,
151
+ "estimated_cost": None
152
+ }
153
+ yield chunk
154
+ streaming_response += content
155
+ last_content = content
156
+ except Exception:
157
+ continue
158
+ # Final chunk with finish_reason="stop"
159
+ delta = ChoiceDelta(content=None, role=None, tool_calls=None)
160
+ choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
161
+ chunk = ChatCompletionChunk(
162
+ id=request_id,
163
+ choices=[choice],
164
+ created=created_time,
165
+ model=model
166
+ )
167
+ chunk.usage = {
168
+ "prompt_tokens": prompt_tokens,
169
+ "completion_tokens": completion_tokens,
170
+ "total_tokens": total_tokens,
171
+ "estimated_cost": None
172
+ }
173
+ yield chunk
174
+ except Exception as e:
175
+ raise IOError(f"MiniMax stream request failed: {e}") from e
176
+
177
+ def _create_non_stream(
178
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
179
+ ) -> ChatCompletion:
180
+ try:
181
+ headers = {
182
+ 'Content-Type': 'application/json',
183
+ 'Authorization': f'Bearer {self._client.api_key}',
184
+ }
185
+ payload_copy = payload.copy()
186
+ payload_copy["stream"] = False
187
+ response = self._client.session.post(
188
+ self._client.api_endpoint,
189
+ headers=headers,
190
+ data=json.dumps(payload_copy),
191
+ timeout=timeout or self._client.timeout,
192
+ proxies=proxies
193
+ )
194
+ response.raise_for_status()
195
+ data = response.json()
196
+ full_text = ""
197
+ finish_reason = "stop"
198
+ if 'choices' in data and data['choices']:
199
+ choice_data = data['choices'][0]
200
+ # MiniMax returns content in 'message' or directly in 'delta' for streaming
201
+ reasoning_content = ""
202
+ if 'message' in choice_data and choice_data['message']:
203
+ full_text = choice_data['message'].get('content', '')
204
+ reasoning_content = choice_data['message'].get('reasoning_content', '')
205
+ elif 'delta' in choice_data and choice_data['delta']:
206
+ full_text = choice_data['delta'].get('content', '')
207
+ reasoning_content = choice_data['delta'].get('reasoning_content', '')
208
+ finish_reason = choice_data.get('finish_reason', 'stop')
209
+ # If both are present, concatenate with <think> ... </think>
210
+ if reasoning_content and reasoning_content.strip():
211
+ if full_text and full_text.strip():
212
+ full_text = f"<think>\n\n{reasoning_content}</think>\n\n{full_text}"
213
+ else:
214
+ full_text = f"<think>\n\n{reasoning_content}</think>\n\n"
215
+ message = ChatCompletionMessage(
216
+ role="assistant",
217
+ content=full_text
218
+ )
219
+ choice = Choice(
220
+ index=0,
221
+ message=message,
222
+ finish_reason=finish_reason
223
+ )
224
+ prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
225
+ completion_tokens = count_tokens(full_text)
226
+ usage = CompletionUsage(
227
+ prompt_tokens=prompt_tokens,
228
+ completion_tokens=completion_tokens,
229
+ total_tokens=prompt_tokens + completion_tokens
230
+ )
231
+ completion = ChatCompletion(
232
+ id=request_id,
233
+ choices=[choice],
234
+ created=created_time,
235
+ model=model,
236
+ usage=usage,
237
+ )
238
+ return completion
239
+ except Exception as e:
240
+ raise IOError(f"MiniMax non-stream request failed: {e}") from e
241
+
242
+ class Chat(BaseChat):
243
+ def __init__(self, client: 'MiniMax'):
244
+ self.completions = Completions(client)
245
+
246
+ class MiniMax(OpenAICompatibleProvider):
247
+ """
248
+ OpenAI-compatible client for MiniMax API.
249
+ """
250
+ AVAILABLE_MODELS = [
251
+ "MiniMax-Reasoning-01"
252
+ ]
253
+ def __init__(self, timeout: int = 30):
254
+ self.timeout = timeout
255
+ self.api_endpoint = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
256
+ self.session = requests.Session()
257
+ self.api_key = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"
258
+ self.chat = Chat(self)
259
+
260
+ @property
261
+ def models(self):
262
+ class _ModelList:
263
+ def list(inner_self):
264
+ return MiniMax.AVAILABLE_MODELS
265
+ return _ModelList()
266
+
267
+ def convert_model_name(self, model: str) -> str:
268
+ if model in self.AVAILABLE_MODELS:
269
+ return model
270
+ return self.AVAILABLE_MODELS[0]
271
+
272
+ if __name__ == "__main__":
273
+ from rich import print
274
+ client = MiniMax()
275
+ messages = [
276
+ {"role": "user", "content": "What is the capital of France?"}
277
+ ]
278
+ # Non-streaming example
279
+ response = client.chat.completions.create(
280
+ model="MiniMax-Reasoning-01",
281
+ messages=messages,
282
+ max_tokens=5000,
283
+ stream=False
284
+ )
285
+ print("Non-streaming response:")
286
+ print(response)
287
+ # Streaming example
288
+ print("\nStreaming response:")
289
+ stream = client.chat.completions.create(
290
+ model="MiniMax-Reasoning-01",
291
+ messages=messages,
292
+ max_tokens=5000,
293
+ stream=True
294
+ )
295
+ for chunk in stream:
296
+ if chunk.choices[0].delta and chunk.choices[0].delta.content:
297
+ print(chunk.choices[0].delta.content, end="")
298
+ print()
@@ -21,9 +21,9 @@
21
21
 
22
22
  The WebScout OpenAI-Compatible Providers module offers a standardized way to interact with various AI providers using the familiar OpenAI API structure. This makes it easy to:
23
23
 
24
- * Use the same code structure across different AI providers
25
- * Switch between providers without major code changes
26
- * Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
24
+ - Use the same code structure across different AI providers
25
+ - Switch between providers without major code changes
26
+ - Leverage the OpenAI ecosystem of tools and libraries with alternative AI providers
27
27
 
28
28
  ## ⚙️ Available Providers
29
29
 
@@ -70,8 +70,9 @@ Currently, the following providers are implemented with OpenAI-compatible interf
70
70
  - FalconH1
71
71
  - XenAI
72
72
  - GeminiProxy
73
- ---
74
-
73
+ - MonoChat
74
+ - Friendli
75
+ - MiniMax
75
76
 
76
77
  ## 💻 Usage Examples
77
78
 
@@ -909,17 +910,17 @@ All providers return responses that mimic the OpenAI API structure, ensuring com
909
910
 
910
911
  The OpenAI-compatible providers are built on a modular architecture:
911
912
 
912
- * `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
913
- * `utils.py`: Provides data structures that mimic OpenAI's response format
914
- * Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
913
+ - `base.py`: Contains abstract base classes that define the OpenAI-compatible interface
914
+ - `utils.py`: Provides data structures that mimic OpenAI's response format
915
+ - Provider-specific implementations (e.g., `deepinfra.py`): Implement the abstract interfaces for specific providers
915
916
 
916
917
  This architecture makes it easy to add new providers while maintaining a consistent interface.
917
918
 
918
919
  ## 📝 Notes
919
920
 
920
- * Some providers may require API keys for full functionality
921
- * Not all OpenAI features are supported by all providers
922
- * Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
921
+ - Some providers may require API keys for full functionality
922
+ - Not all OpenAI features are supported by all providers
923
+ - Response formats are standardized to match OpenAI's format, but the underlying content depends on the specific provider and model
923
924
 
924
925
  ## 🤝 Contributing
925
926
 
@@ -932,24 +933,24 @@ Want to add a new OpenAI-compatible provider? Follow these steps:
932
933
 
933
934
  ## 📚 Related Documentation
934
935
 
935
- * [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
936
- * [DeepInfra Documentation](https://deepinfra.com/docs)
937
- * [Glider.so Website](https://glider.so/)
938
- * [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
939
- * [X0GPT Website](https://x0-gpt.devwtf.in/)
940
- * [WiseCat Website](https://wise-cat-groq.vercel.app/)
941
- * [Venice AI Website](https://venice.ai/)
942
- * [ExaAI Website](https://o3minichat.exa.ai/)
943
- * [TypeGPT Website](https://chat.typegpt.net/)
944
- * [SciraChat Website](https://scira.ai/)
945
- * [FreeAIChat Website](https://freeaichatplayground.com/)
946
- * [LLMChatCo Website](https://llmchat.co/)
947
- * [Yep.com Website](https://yep.com/)
948
- * [HeckAI Website](https://heck.ai/)
949
- * [SonusAI Website](https://chat.sonus.ai/)
950
- * [ExaChat Website](https://exa-chat.vercel.app/)
951
- * [Netwrck Website](https://netwrck.com/)
952
- * [StandardInput Website](https://chat.standard-input.com/)
936
+ - [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
937
+ - [DeepInfra Documentation](https://deepinfra.com/docs)
938
+ - [Glider.so Website](https://glider.so/)
939
+ - [ChatGPT Clone Website](https://chatgpt-clone-ten-nu.vercel.app/)
940
+ - [X0GPT Website](https://x0-gpt.devwtf.in/)
941
+ - [WiseCat Website](https://wise-cat-groq.vercel.app/)
942
+ - [Venice AI Website](https://venice.ai/)
943
+ - [ExaAI Website](https://o3minichat.exa.ai/)
944
+ - [TypeGPT Website](https://chat.typegpt.net/)
945
+ - [SciraChat Website](https://scira.ai/)
946
+ - [FreeAIChat Website](https://freeaichatplayground.com/)
947
+ - [LLMChatCo Website](https://llmchat.co/)
948
+ - [Yep.com Website](https://yep.com/)
949
+ - [HeckAI Website](https://heck.ai/)
950
+ - [SonusAI Website](https://chat.sonus.ai/)
951
+ - [ExaChat Website](https://exa-chat.vercel.app/)
952
+ - [Netwrck Website](https://netwrck.com/)
953
+ - [StandardInput Website](https://chat.standard-input.com/)
953
954
 
954
955
  <div align="center">
955
956
  <a href="https://t.me/PyscoutAI"><img alt="Telegram Group" src="https://img.shields.io/badge/Telegram%20Group-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white"></a>
@@ -208,21 +208,15 @@ class TogetherAI(OpenAICompatibleProvider):
208
208
  OpenAI-compatible client for TogetherAI API.
209
209
  """
210
210
  AVAILABLE_MODELS = [
211
- "Gryphe/MythoMax-L2-13b",
212
- "Gryphe/MythoMax-L2-13b-Lite",
213
211
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
214
212
  "Qwen/QwQ-32B",
215
213
  "Qwen/Qwen2-72B-Instruct",
216
214
  "Qwen/Qwen2-VL-72B-Instruct",
217
215
  "Qwen/Qwen2.5-72B-Instruct-Turbo",
218
216
  "Qwen/Qwen2.5-7B-Instruct-Turbo",
219
- "Qwen/Qwen2.5-Coder-32B-Instruct",
220
217
  "Qwen/Qwen2.5-VL-72B-Instruct",
221
- "Qwen/Qwen3-235B-A22B-fp8",
222
218
  "Qwen/Qwen3-235B-A22B-fp8-tput",
223
- "Rrrr/meta-llama/Llama-3-70b-chat-hf-6f9ad551",
224
- "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-03dc18e1",
225
- "Rrrr/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo-6c92f39d",
219
+ "Salesforce/Llama-Rank-V1",
226
220
  "arcee-ai/arcee-blitz",
227
221
  "arcee-ai/caller",
228
222
  "arcee-ai/coder-large",
@@ -237,13 +231,12 @@ class TogetherAI(OpenAICompatibleProvider):
237
231
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
238
232
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
239
233
  "deepseek-ai/DeepSeek-V3",
240
- "deepseek-ai/DeepSeek-V3-p-dp",
241
234
  "google/gemma-2-27b-it",
242
- "google/gemma-2b-it",
243
235
  "lgai/exaone-3-5-32b-instruct",
244
236
  "lgai/exaone-deep-32b",
245
237
  "marin-community/marin-8b-instruct",
246
- "meta-llama/Llama-3-70b-chat-hf",
238
+ "meta-llama-llama-2-70b-hf",
239
+ "meta-llama/Llama-2-70b-hf",
247
240
  "meta-llama/Llama-3-8b-chat-hf",
248
241
  "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
249
242
  "meta-llama/Llama-3.2-3B-Instruct-Turbo",
@@ -265,14 +258,8 @@ class TogetherAI(OpenAICompatibleProvider):
265
258
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
266
259
  "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
267
260
  "perplexity-ai/r1-1776",
268
- "roberizk@gmail.com/meta-llama/Llama-3-70b-chat-hf-26ee936b",
269
- "roberizk@gmail.com/meta-llama/Meta-Llama-3-70B-Instruct-6feb41f7",
270
- "roberizk@gmail.com/meta-llama/Meta-Llama-3-8B-Instruct-8ced8839",
271
261
  "scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
272
- "scb10x/scb10x-llama3-1-typhoon2-8b-instruct",
273
- "togethercomputer/MoA-1",
274
- "togethercomputer/MoA-1-Turbo",
275
- "togethercomputer/Refuel-Llm-V2",
262
+ "scb10x/scb10x-typhoon-2-1-gemma3-12b",
276
263
  "togethercomputer/Refuel-Llm-V2-Small",
277
264
  ]
278
265
 
@@ -43,7 +43,9 @@ from .PI import * # Add PI.ai provider
43
43
  from .TogetherAI import * # Add TogetherAI provider
44
44
  from .xenai import * # Add XenAI provider
45
45
  from .GeminiProxy import * # Add GeminiProxy provider
46
-
46
+ from .friendli import *
47
+ from .monochat import *
48
+ from .MiniMax import * # Add MiniMaxAI provider
47
49
  # Export auto-proxy functionality
48
50
  from .autoproxy import (
49
51
  get_auto_proxy,