webscout 8.3.2__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +146 -37
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +30 -29
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +3 -1
- webscout/Provider/OPENAI/autoproxy.py +752 -17
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +128 -104
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/METADATA +102 -90
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/RECORD +75 -87
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from webscout.Provider.OPENAI.utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
# --- MonoChat Client ---
|
|
17
|
+
|
|
18
|
+
class Completions(BaseCompletions):
|
|
19
|
+
def __init__(self, client: 'MonoChat'):
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def create(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
model: str,
|
|
26
|
+
messages: List[Dict[str, str]],
|
|
27
|
+
max_tokens: Optional[int] = 2049,
|
|
28
|
+
stream: bool = False,
|
|
29
|
+
temperature: Optional[float] = None,
|
|
30
|
+
top_p: Optional[float] = None,
|
|
31
|
+
timeout: Optional[int] = None,
|
|
32
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
33
|
+
**kwargs: Any
|
|
34
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
|
+
"""
|
|
36
|
+
Creates a model response for the given chat conversation.
|
|
37
|
+
Mimics openai.chat.completions.create
|
|
38
|
+
"""
|
|
39
|
+
# Prepare the payload for MonoChat API
|
|
40
|
+
payload = {
|
|
41
|
+
"messages": messages,
|
|
42
|
+
"model": model
|
|
43
|
+
}
|
|
44
|
+
if max_tokens is not None and max_tokens > 0:
|
|
45
|
+
payload["max_tokens"] = max_tokens
|
|
46
|
+
if temperature is not None:
|
|
47
|
+
payload["temperature"] = temperature
|
|
48
|
+
if top_p is not None:
|
|
49
|
+
payload["top_p"] = top_p
|
|
50
|
+
payload.update(kwargs)
|
|
51
|
+
|
|
52
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
53
|
+
created_time = int(time.time())
|
|
54
|
+
|
|
55
|
+
if stream:
|
|
56
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
57
|
+
else:
|
|
58
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
59
|
+
|
|
60
|
+
def _create_stream(
|
|
61
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
62
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
63
|
+
try:
|
|
64
|
+
response = self._client.session.post(
|
|
65
|
+
self._client.api_endpoint,
|
|
66
|
+
headers=self._client.headers,
|
|
67
|
+
json=payload,
|
|
68
|
+
stream=True,
|
|
69
|
+
timeout=timeout or self._client.timeout,
|
|
70
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
71
|
+
)
|
|
72
|
+
if not response.ok:
|
|
73
|
+
raise IOError(
|
|
74
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
78
|
+
completion_tokens = 0
|
|
79
|
+
total_tokens = 0
|
|
80
|
+
|
|
81
|
+
for line in response.iter_lines():
|
|
82
|
+
if line:
|
|
83
|
+
decoded_line = line.decode('utf-8').strip()
|
|
84
|
+
# MonoChat returns lines like: 0:"Hello" or 0:"!" etc.
|
|
85
|
+
match = re.search(r'0:"(.*?)"', decoded_line)
|
|
86
|
+
if match:
|
|
87
|
+
content = match.group(1)
|
|
88
|
+
content = self._client.format_text(content)
|
|
89
|
+
completion_tokens += count_tokens(content)
|
|
90
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
91
|
+
|
|
92
|
+
delta = ChoiceDelta(
|
|
93
|
+
content=content,
|
|
94
|
+
role="assistant",
|
|
95
|
+
tool_calls=None
|
|
96
|
+
)
|
|
97
|
+
choice = Choice(
|
|
98
|
+
index=0,
|
|
99
|
+
delta=delta,
|
|
100
|
+
finish_reason=None,
|
|
101
|
+
logprobs=None
|
|
102
|
+
)
|
|
103
|
+
chunk = ChatCompletionChunk(
|
|
104
|
+
id=request_id,
|
|
105
|
+
choices=[choice],
|
|
106
|
+
created=created_time,
|
|
107
|
+
model=model,
|
|
108
|
+
system_fingerprint=None
|
|
109
|
+
)
|
|
110
|
+
chunk.usage = {
|
|
111
|
+
"prompt_tokens": prompt_tokens,
|
|
112
|
+
"completion_tokens": completion_tokens,
|
|
113
|
+
"total_tokens": total_tokens,
|
|
114
|
+
"estimated_cost": None
|
|
115
|
+
}
|
|
116
|
+
yield chunk
|
|
117
|
+
|
|
118
|
+
# Final chunk with finish_reason="stop"
|
|
119
|
+
delta = ChoiceDelta(
|
|
120
|
+
content=None,
|
|
121
|
+
role=None,
|
|
122
|
+
tool_calls=None
|
|
123
|
+
)
|
|
124
|
+
choice = Choice(
|
|
125
|
+
index=0,
|
|
126
|
+
delta=delta,
|
|
127
|
+
finish_reason="stop",
|
|
128
|
+
logprobs=None
|
|
129
|
+
)
|
|
130
|
+
chunk = ChatCompletionChunk(
|
|
131
|
+
id=request_id,
|
|
132
|
+
choices=[choice],
|
|
133
|
+
created=created_time,
|
|
134
|
+
model=model,
|
|
135
|
+
system_fingerprint=None
|
|
136
|
+
)
|
|
137
|
+
chunk.usage = {
|
|
138
|
+
"prompt_tokens": prompt_tokens,
|
|
139
|
+
"completion_tokens": completion_tokens,
|
|
140
|
+
"total_tokens": total_tokens,
|
|
141
|
+
"estimated_cost": None
|
|
142
|
+
}
|
|
143
|
+
yield chunk
|
|
144
|
+
|
|
145
|
+
except Exception as e:
|
|
146
|
+
print(f"Error during MonoChat stream request: {e}")
|
|
147
|
+
raise IOError(f"MonoChat request failed: {e}") from e
|
|
148
|
+
|
|
149
|
+
def _create_non_stream(
|
|
150
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
151
|
+
) -> ChatCompletion:
|
|
152
|
+
try:
|
|
153
|
+
response = self._client.session.post(
|
|
154
|
+
self._client.api_endpoint,
|
|
155
|
+
headers=self._client.headers,
|
|
156
|
+
json=payload,
|
|
157
|
+
stream=True,
|
|
158
|
+
timeout=timeout or self._client.timeout,
|
|
159
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
160
|
+
)
|
|
161
|
+
if not response.ok:
|
|
162
|
+
raise IOError(
|
|
163
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
full_text = ""
|
|
167
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
168
|
+
if line:
|
|
169
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
170
|
+
if match:
|
|
171
|
+
content = match.group(1)
|
|
172
|
+
full_text += content
|
|
173
|
+
|
|
174
|
+
full_text = self._client.format_text(full_text)
|
|
175
|
+
|
|
176
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
177
|
+
completion_tokens = count_tokens(full_text)
|
|
178
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
179
|
+
|
|
180
|
+
message = ChatCompletionMessage(
|
|
181
|
+
role="assistant",
|
|
182
|
+
content=full_text
|
|
183
|
+
)
|
|
184
|
+
choice = Choice(
|
|
185
|
+
index=0,
|
|
186
|
+
message=message,
|
|
187
|
+
finish_reason="stop"
|
|
188
|
+
)
|
|
189
|
+
usage = CompletionUsage(
|
|
190
|
+
prompt_tokens=prompt_tokens,
|
|
191
|
+
completion_tokens=completion_tokens,
|
|
192
|
+
total_tokens=total_tokens
|
|
193
|
+
)
|
|
194
|
+
completion = ChatCompletion(
|
|
195
|
+
id=request_id,
|
|
196
|
+
choices=[choice],
|
|
197
|
+
created=created_time,
|
|
198
|
+
model=model,
|
|
199
|
+
usage=usage,
|
|
200
|
+
)
|
|
201
|
+
return completion
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
print(f"Error during MonoChat non-stream request: {e}")
|
|
205
|
+
raise IOError(f"MonoChat request failed: {e}") from e
|
|
206
|
+
|
|
207
|
+
class Chat(BaseChat):
|
|
208
|
+
def __init__(self, client: 'MonoChat'):
|
|
209
|
+
self.completions = Completions(client)
|
|
210
|
+
|
|
211
|
+
class MonoChat(OpenAICompatibleProvider):
|
|
212
|
+
"""
|
|
213
|
+
OpenAI-compatible client for MonoChat API.
|
|
214
|
+
|
|
215
|
+
Usage:
|
|
216
|
+
client = MonoChat()
|
|
217
|
+
response = client.chat.completions.create(
|
|
218
|
+
model="gpt-4.1",
|
|
219
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
220
|
+
)
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
AVAILABLE_MODELS = [
|
|
224
|
+
"deepseek-r1",
|
|
225
|
+
"deepseek-v3",
|
|
226
|
+
"uncensored-r1-32b",
|
|
227
|
+
"o3-pro",
|
|
228
|
+
"o4-mini",
|
|
229
|
+
"o3",
|
|
230
|
+
"gpt-4.5-preview",
|
|
231
|
+
"gpt-4.1",
|
|
232
|
+
"gpt-4.1-mini",
|
|
233
|
+
"gpt-4.1-nano",
|
|
234
|
+
"gpt-4o",
|
|
235
|
+
"gpt-4o-mini",
|
|
236
|
+
"gpt-4o-search-preview",
|
|
237
|
+
"gpt-4o-mini-search-preview",
|
|
238
|
+
"gpt-4-turbo"
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
]
|
|
242
|
+
|
|
243
|
+
def __init__(
|
|
244
|
+
self,
|
|
245
|
+
browser: str = "chrome"
|
|
246
|
+
):
|
|
247
|
+
"""
|
|
248
|
+
Initialize the MonoChat client.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
browser: Browser to emulate in user agent
|
|
252
|
+
"""
|
|
253
|
+
self.timeout = None
|
|
254
|
+
self.api_endpoint = "https://www.chatwithmono.xyz/api/chat"
|
|
255
|
+
self.session = requests.Session()
|
|
256
|
+
|
|
257
|
+
agent = LitAgent()
|
|
258
|
+
self.fingerprint = agent.generate_fingerprint(browser)
|
|
259
|
+
|
|
260
|
+
self.headers = {
|
|
261
|
+
"accept": "*/*",
|
|
262
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
263
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
264
|
+
"content-type": "application/json",
|
|
265
|
+
"origin": "https://www.chatwithmono.xyz",
|
|
266
|
+
"referer": "https://www.chatwithmono.xyz/",
|
|
267
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
self.session.headers.update(self.headers)
|
|
271
|
+
self.chat = Chat(self)
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def models(self):
|
|
275
|
+
class _ModelList:
|
|
276
|
+
def list(inner_self):
|
|
277
|
+
return MonoChat.AVAILABLE_MODELS
|
|
278
|
+
return _ModelList()
|
|
279
|
+
|
|
280
|
+
def format_text(self, text: str) -> str:
|
|
281
|
+
"""
|
|
282
|
+
Format text by replacing escaped newlines with actual newlines.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
text: Text to format
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Formatted text
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
text = text.replace('\\\\', '\\')
|
|
292
|
+
text = text.replace('\\n', '\n')
|
|
293
|
+
text = text.replace('\\r', '\r')
|
|
294
|
+
text = text.replace('\\t', '\t')
|
|
295
|
+
text = text.replace('\\"', '"')
|
|
296
|
+
text = text.replace("\\'", "'")
|
|
297
|
+
try:
|
|
298
|
+
json_str = f'"{text}"'
|
|
299
|
+
decoded = json.loads(json_str)
|
|
300
|
+
return decoded
|
|
301
|
+
except json.JSONDecodeError:
|
|
302
|
+
return text
|
|
303
|
+
except Exception as e:
|
|
304
|
+
print(f"Warning: Error formatting text: {e}")
|
|
305
|
+
return text
|
|
306
|
+
|
|
307
|
+
def convert_model_name(self, model: str) -> str:
|
|
308
|
+
"""
|
|
309
|
+
Convert model names to ones supported by MonoChat.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
model: Model name to convert
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
MonoChat model name
|
|
316
|
+
"""
|
|
317
|
+
return model
|
|
318
|
+
|
|
319
|
+
if __name__ == "__main__":
|
|
320
|
+
client = MonoChat()
|
|
321
|
+
response = client.chat.completions.create(
|
|
322
|
+
model="gpt-4.1",
|
|
323
|
+
messages=[{"role": "user", "content": "tell me about humans"}],
|
|
324
|
+
max_tokens=1000,
|
|
325
|
+
stream=True
|
|
326
|
+
)
|
|
327
|
+
for chunk in response:
|
|
328
|
+
if chunk.choices and hasattr(chunk.choices[0], "delta") and getattr(chunk.choices[0].delta, "content", None):
|
|
329
|
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
|
@@ -1,172 +1 @@
|
|
|
1
|
-
from pydantic import
|
|
2
|
-
# dataclasses
|
|
3
|
-
dataclasses,
|
|
4
|
-
# functional validators
|
|
5
|
-
field_validator,
|
|
6
|
-
model_validator,
|
|
7
|
-
AfterValidator,
|
|
8
|
-
BeforeValidator,
|
|
9
|
-
PlainValidator,
|
|
10
|
-
WrapValidator,
|
|
11
|
-
SkipValidation,
|
|
12
|
-
InstanceOf,
|
|
13
|
-
ModelWrapValidatorHandler,
|
|
14
|
-
# JSON Schema
|
|
15
|
-
WithJsonSchema,
|
|
16
|
-
# deprecated V1 functional validators
|
|
17
|
-
root_validator,
|
|
18
|
-
validator,
|
|
19
|
-
# functional serializers
|
|
20
|
-
field_serializer,
|
|
21
|
-
model_serializer,
|
|
22
|
-
PlainSerializer,
|
|
23
|
-
SerializeAsAny,
|
|
24
|
-
WrapSerializer,
|
|
25
|
-
# config
|
|
26
|
-
ConfigDict,
|
|
27
|
-
with_config,
|
|
28
|
-
# deprecated V1 config
|
|
29
|
-
BaseConfig,
|
|
30
|
-
Extra,
|
|
31
|
-
# validate_call
|
|
32
|
-
validate_call,
|
|
33
|
-
# errors
|
|
34
|
-
PydanticErrorCodes,
|
|
35
|
-
PydanticUserError,
|
|
36
|
-
PydanticSchemaGenerationError,
|
|
37
|
-
PydanticImportError,
|
|
38
|
-
PydanticUndefinedAnnotation,
|
|
39
|
-
PydanticInvalidForJsonSchema,
|
|
40
|
-
PydanticForbiddenQualifier,
|
|
41
|
-
# fields
|
|
42
|
-
Field,
|
|
43
|
-
computed_field,
|
|
44
|
-
PrivateAttr,
|
|
45
|
-
# alias
|
|
46
|
-
AliasChoices,
|
|
47
|
-
AliasGenerator,
|
|
48
|
-
AliasPath,
|
|
49
|
-
# main
|
|
50
|
-
BaseModel,
|
|
51
|
-
create_model,
|
|
52
|
-
# network
|
|
53
|
-
AnyUrl,
|
|
54
|
-
AnyHttpUrl,
|
|
55
|
-
FileUrl,
|
|
56
|
-
HttpUrl,
|
|
57
|
-
FtpUrl,
|
|
58
|
-
WebsocketUrl,
|
|
59
|
-
AnyWebsocketUrl,
|
|
60
|
-
UrlConstraints,
|
|
61
|
-
EmailStr,
|
|
62
|
-
NameEmail,
|
|
63
|
-
IPvAnyAddress,
|
|
64
|
-
IPvAnyInterface,
|
|
65
|
-
IPvAnyNetwork,
|
|
66
|
-
PostgresDsn,
|
|
67
|
-
CockroachDsn,
|
|
68
|
-
AmqpDsn,
|
|
69
|
-
RedisDsn,
|
|
70
|
-
MongoDsn,
|
|
71
|
-
KafkaDsn,
|
|
72
|
-
NatsDsn,
|
|
73
|
-
MySQLDsn,
|
|
74
|
-
MariaDBDsn,
|
|
75
|
-
ClickHouseDsn,
|
|
76
|
-
SnowflakeDsn,
|
|
77
|
-
validate_email,
|
|
78
|
-
# root_model
|
|
79
|
-
RootModel,
|
|
80
|
-
# deprecated tools
|
|
81
|
-
parse_obj_as,
|
|
82
|
-
schema_of,
|
|
83
|
-
schema_json_of,
|
|
84
|
-
# types
|
|
85
|
-
Strict,
|
|
86
|
-
StrictStr,
|
|
87
|
-
conbytes,
|
|
88
|
-
conlist,
|
|
89
|
-
conset,
|
|
90
|
-
confrozenset,
|
|
91
|
-
constr,
|
|
92
|
-
StringConstraints,
|
|
93
|
-
ImportString,
|
|
94
|
-
conint,
|
|
95
|
-
PositiveInt,
|
|
96
|
-
NegativeInt,
|
|
97
|
-
NonNegativeInt,
|
|
98
|
-
NonPositiveInt,
|
|
99
|
-
confloat,
|
|
100
|
-
PositiveFloat,
|
|
101
|
-
NegativeFloat,
|
|
102
|
-
NonNegativeFloat,
|
|
103
|
-
NonPositiveFloat,
|
|
104
|
-
FiniteFloat,
|
|
105
|
-
condecimal,
|
|
106
|
-
condate,
|
|
107
|
-
UUID1,
|
|
108
|
-
UUID3,
|
|
109
|
-
UUID4,
|
|
110
|
-
UUID5,
|
|
111
|
-
UUID6,
|
|
112
|
-
UUID7,
|
|
113
|
-
UUID8,
|
|
114
|
-
FilePath,
|
|
115
|
-
DirectoryPath,
|
|
116
|
-
NewPath,
|
|
117
|
-
Json,
|
|
118
|
-
Secret,
|
|
119
|
-
SecretStr,
|
|
120
|
-
SecretBytes,
|
|
121
|
-
SocketPath,
|
|
122
|
-
StrictBool,
|
|
123
|
-
StrictBytes,
|
|
124
|
-
StrictInt,
|
|
125
|
-
StrictFloat,
|
|
126
|
-
PaymentCardNumber,
|
|
127
|
-
ByteSize,
|
|
128
|
-
PastDate,
|
|
129
|
-
FutureDate,
|
|
130
|
-
PastDatetime,
|
|
131
|
-
FutureDatetime,
|
|
132
|
-
AwareDatetime,
|
|
133
|
-
NaiveDatetime,
|
|
134
|
-
AllowInfNan,
|
|
135
|
-
EncoderProtocol,
|
|
136
|
-
EncodedBytes,
|
|
137
|
-
EncodedStr,
|
|
138
|
-
Base64Encoder,
|
|
139
|
-
Base64Bytes,
|
|
140
|
-
Base64Str,
|
|
141
|
-
Base64UrlBytes,
|
|
142
|
-
Base64UrlStr,
|
|
143
|
-
GetPydanticSchema,
|
|
144
|
-
Tag,
|
|
145
|
-
Discriminator,
|
|
146
|
-
JsonValue,
|
|
147
|
-
FailFast,
|
|
148
|
-
# type_adapter
|
|
149
|
-
TypeAdapter,
|
|
150
|
-
# version
|
|
151
|
-
__version__,
|
|
152
|
-
VERSION,
|
|
153
|
-
# warnings
|
|
154
|
-
PydanticDeprecatedSince20,
|
|
155
|
-
PydanticDeprecatedSince26,
|
|
156
|
-
PydanticDeprecatedSince29,
|
|
157
|
-
PydanticDeprecatedSince210,
|
|
158
|
-
PydanticDeprecatedSince211,
|
|
159
|
-
PydanticDeprecationWarning,
|
|
160
|
-
PydanticExperimentalWarning,
|
|
161
|
-
# annotated handlers
|
|
162
|
-
GetCoreSchemaHandler,
|
|
163
|
-
GetJsonSchemaHandler,
|
|
164
|
-
# pydantic_core
|
|
165
|
-
ValidationError,
|
|
166
|
-
ValidationInfo,
|
|
167
|
-
SerializationInfo,
|
|
168
|
-
ValidatorFunctionWrapHandler,
|
|
169
|
-
FieldSerializationInfo,
|
|
170
|
-
SerializerFunctionWrapHandler,
|
|
171
|
-
OnErrorOmit,
|
|
172
|
-
)
|
|
1
|
+
from pydantic import *
|
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
from typing import List, Dict, Optional, Any
|
|
1
|
+
from typing import List, Dict, Optional, Any
|
|
2
2
|
from enum import Enum
|
|
3
3
|
import time
|
|
4
4
|
import uuid
|
|
5
5
|
from webscout.Provider.OPENAI.pydantic_imports import (
|
|
6
|
-
BaseModel, Field,
|
|
7
|
-
StrictStr, StrictInt, StrictFloat, StrictBool
|
|
6
|
+
BaseModel, Field, StrictStr, StrictInt
|
|
8
7
|
)
|
|
9
8
|
|
|
10
9
|
# --- OpenAI Response Structure Mimics ---
|
|
@@ -270,49 +269,27 @@ def get_last_user_message(messages: List[Dict[str, Any]]) -> str:
|
|
|
270
269
|
|
|
271
270
|
def count_tokens(text_or_messages: Any) -> int:
|
|
272
271
|
"""
|
|
273
|
-
Count tokens in a string or a list of messages using tiktoken
|
|
272
|
+
Count tokens in a string or a list of messages using tiktoken.
|
|
274
273
|
|
|
275
274
|
Args:
|
|
276
275
|
text_or_messages: A string or a list of messages (string or any type).
|
|
277
|
-
model: Optional model name for tiktoken encoding.
|
|
278
276
|
|
|
279
277
|
Returns:
|
|
280
278
|
int: Number of tokens.
|
|
281
279
|
"""
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
else:
|
|
299
|
-
return 0
|
|
300
|
-
except ImportError:
|
|
301
|
-
# Fallback to webstoken's WordTokenizer
|
|
302
|
-
try:
|
|
303
|
-
from webstoken import WordTokenizer
|
|
304
|
-
except ImportError:
|
|
305
|
-
return 0
|
|
306
|
-
tokenizer = WordTokenizer()
|
|
307
|
-
if isinstance(text_or_messages, str):
|
|
308
|
-
return len(tokenizer.tokenize(text_or_messages))
|
|
309
|
-
elif isinstance(text_or_messages, list):
|
|
310
|
-
total = 0
|
|
311
|
-
for m in text_or_messages:
|
|
312
|
-
if isinstance(m, str):
|
|
313
|
-
total += len(tokenizer.tokenize(m))
|
|
314
|
-
else:
|
|
315
|
-
total += len(tokenizer.tokenize(str(m)))
|
|
316
|
-
return total
|
|
317
|
-
else:
|
|
318
|
-
return 0
|
|
280
|
+
import tiktoken
|
|
281
|
+
if isinstance(text_or_messages, str):
|
|
282
|
+
enc = tiktoken.encoding_for_model("gpt-4o")
|
|
283
|
+
return len(enc.encode(text_or_messages))
|
|
284
|
+
elif isinstance(text_or_messages, list):
|
|
285
|
+
enc = tiktoken.encoding_for_model("gpt-4o")
|
|
286
|
+
total = 0
|
|
287
|
+
for m in text_or_messages:
|
|
288
|
+
if isinstance(m, str):
|
|
289
|
+
total += len(enc.encode(m))
|
|
290
|
+
else:
|
|
291
|
+
total += len(enc.encode(str(m)))
|
|
292
|
+
return total
|
|
293
|
+
else:
|
|
294
|
+
return 0
|
|
295
|
+
|
|
@@ -6,8 +6,8 @@ import json
|
|
|
6
6
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
7
|
|
|
8
8
|
# Import base classes and utility structures
|
|
9
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from .utils import (
|
|
9
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
11
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
12
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
13
13
|
)
|
|
@@ -365,3 +365,15 @@ class X0GPT(OpenAICompatibleProvider):
|
|
|
365
365
|
"""
|
|
366
366
|
# X0GPT doesn't actually use model names, but we'll keep this for compatibility
|
|
367
367
|
return model
|
|
368
|
+
|
|
369
|
+
if __name__ == "__main__":
|
|
370
|
+
from rich import print
|
|
371
|
+
client = X0GPT()
|
|
372
|
+
response = client.chat.completions.create(
|
|
373
|
+
model="X0GPT",
|
|
374
|
+
messages=[{"role": "user", "content": "Hello!"}],
|
|
375
|
+
stream=True
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
for chunk in response:
|
|
379
|
+
print(chunk, end='', flush=True)
|