webscout 8.3.2__py3-none-any.whl → 8.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +367 -41
- webscout/Bard.py +2 -22
- webscout/Bing_search.py +1 -2
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/scira_search.py +24 -11
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/Deepinfra.py +75 -57
- webscout/Provider/ExaChat.py +93 -63
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/FreeGemini.py +2 -2
- webscout/Provider/Gemini.py +3 -10
- webscout/Provider/GeminiProxy.py +31 -5
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/LambdaChat.py +39 -31
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +39 -59
- webscout/Provider/OLLAMA.py +8 -9
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +31 -30
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/autoproxy.py +753 -18
- webscout/Provider/OPENAI/base.py +7 -76
- webscout/Provider/OPENAI/copilot.py +73 -26
- webscout/Provider/OPENAI/deepinfra.py +96 -132
- webscout/Provider/OPENAI/exachat.py +9 -5
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/netwrck.py +4 -7
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/qodo.py +630 -0
- webscout/Provider/OPENAI/scirachat.py +82 -49
- webscout/Provider/OPENAI/textpollinations.py +13 -12
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/typegpt.py +4 -4
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/Qodo.py +454 -0
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +91 -82
- webscout/Provider/TogetherAI.py +32 -48
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/__init__.py +6 -6
- webscout/Provider/copilot.py +58 -61
- webscout/Provider/freeaichat.py +64 -55
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/monochat.py +275 -0
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +257 -104
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +43 -48
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +325 -299
- webscout/Provider/yep.py +79 -96
- webscout/__init__.py +7 -2
- webscout/auth/__init__.py +12 -1
- webscout/auth/providers.py +27 -5
- webscout/auth/routes.py +146 -105
- webscout/auth/server.py +367 -312
- webscout/client.py +121 -116
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/version.py +1 -1
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/METADATA +102 -91
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/RECORD +95 -107
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/OPENAI/freeaichat.py +0 -363
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- webscout/auth/static/favicon.svg +0 -11
- webscout/auth/swagger_ui.py +0 -203
- webscout/auth/templates/components/authentication.html +0 -237
- webscout/auth/templates/components/base.html +0 -103
- webscout/auth/templates/components/endpoints.html +0 -750
- webscout/auth/templates/components/examples.html +0 -491
- webscout/auth/templates/components/footer.html +0 -75
- webscout/auth/templates/components/header.html +0 -27
- webscout/auth/templates/components/models.html +0 -286
- webscout/auth/templates/components/navigation.html +0 -70
- webscout/auth/templates/static/api.js +0 -455
- webscout/auth/templates/static/icons.js +0 -168
- webscout/auth/templates/static/main.js +0 -784
- webscout/auth/templates/static/particles.js +0 -201
- webscout/auth/templates/static/styles.css +0 -3353
- webscout/auth/templates/static/ui.js +0 -374
- webscout/auth/templates/swagger_ui.html +0 -170
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/WHEEL +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.2.dist-info → webscout-8.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import uuid
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
+
|
|
8
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
|
+
from webscout.Provider.OPENAI.utils import (
|
|
10
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
|
+
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from webscout.litagent import LitAgent
|
|
15
|
+
|
|
16
|
+
# --- MonoChat Client ---
|
|
17
|
+
|
|
18
|
+
class Completions(BaseCompletions):
|
|
19
|
+
def __init__(self, client: 'MonoChat'):
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def create(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
model: str,
|
|
26
|
+
messages: List[Dict[str, str]],
|
|
27
|
+
max_tokens: Optional[int] = 2049,
|
|
28
|
+
stream: bool = False,
|
|
29
|
+
temperature: Optional[float] = None,
|
|
30
|
+
top_p: Optional[float] = None,
|
|
31
|
+
timeout: Optional[int] = None,
|
|
32
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
33
|
+
**kwargs: Any
|
|
34
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
|
+
"""
|
|
36
|
+
Creates a model response for the given chat conversation.
|
|
37
|
+
Mimics openai.chat.completions.create
|
|
38
|
+
"""
|
|
39
|
+
# Prepare the payload for MonoChat API
|
|
40
|
+
payload = {
|
|
41
|
+
"messages": messages,
|
|
42
|
+
"model": model
|
|
43
|
+
}
|
|
44
|
+
if max_tokens is not None and max_tokens > 0:
|
|
45
|
+
payload["max_tokens"] = max_tokens
|
|
46
|
+
if temperature is not None:
|
|
47
|
+
payload["temperature"] = temperature
|
|
48
|
+
if top_p is not None:
|
|
49
|
+
payload["top_p"] = top_p
|
|
50
|
+
payload.update(kwargs)
|
|
51
|
+
|
|
52
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
53
|
+
created_time = int(time.time())
|
|
54
|
+
|
|
55
|
+
if stream:
|
|
56
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
57
|
+
else:
|
|
58
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
59
|
+
|
|
60
|
+
def _create_stream(
|
|
61
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
62
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
63
|
+
try:
|
|
64
|
+
response = self._client.session.post(
|
|
65
|
+
self._client.api_endpoint,
|
|
66
|
+
headers=self._client.headers,
|
|
67
|
+
json=payload,
|
|
68
|
+
stream=True,
|
|
69
|
+
timeout=timeout or self._client.timeout,
|
|
70
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
71
|
+
)
|
|
72
|
+
if not response.ok:
|
|
73
|
+
raise IOError(
|
|
74
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
78
|
+
completion_tokens = 0
|
|
79
|
+
total_tokens = 0
|
|
80
|
+
|
|
81
|
+
for line in response.iter_lines():
|
|
82
|
+
if line:
|
|
83
|
+
decoded_line = line.decode('utf-8').strip()
|
|
84
|
+
# MonoChat returns lines like: 0:"Hello" or 0:"!" etc.
|
|
85
|
+
match = re.search(r'0:"(.*?)"', decoded_line)
|
|
86
|
+
if match:
|
|
87
|
+
content = match.group(1)
|
|
88
|
+
content = self._client.format_text(content)
|
|
89
|
+
completion_tokens += count_tokens(content)
|
|
90
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
91
|
+
|
|
92
|
+
delta = ChoiceDelta(
|
|
93
|
+
content=content,
|
|
94
|
+
role="assistant",
|
|
95
|
+
tool_calls=None
|
|
96
|
+
)
|
|
97
|
+
choice = Choice(
|
|
98
|
+
index=0,
|
|
99
|
+
delta=delta,
|
|
100
|
+
finish_reason=None,
|
|
101
|
+
logprobs=None
|
|
102
|
+
)
|
|
103
|
+
chunk = ChatCompletionChunk(
|
|
104
|
+
id=request_id,
|
|
105
|
+
choices=[choice],
|
|
106
|
+
created=created_time,
|
|
107
|
+
model=model,
|
|
108
|
+
system_fingerprint=None
|
|
109
|
+
)
|
|
110
|
+
chunk.usage = {
|
|
111
|
+
"prompt_tokens": prompt_tokens,
|
|
112
|
+
"completion_tokens": completion_tokens,
|
|
113
|
+
"total_tokens": total_tokens,
|
|
114
|
+
"estimated_cost": None
|
|
115
|
+
}
|
|
116
|
+
yield chunk
|
|
117
|
+
|
|
118
|
+
# Final chunk with finish_reason="stop"
|
|
119
|
+
delta = ChoiceDelta(
|
|
120
|
+
content=None,
|
|
121
|
+
role=None,
|
|
122
|
+
tool_calls=None
|
|
123
|
+
)
|
|
124
|
+
choice = Choice(
|
|
125
|
+
index=0,
|
|
126
|
+
delta=delta,
|
|
127
|
+
finish_reason="stop",
|
|
128
|
+
logprobs=None
|
|
129
|
+
)
|
|
130
|
+
chunk = ChatCompletionChunk(
|
|
131
|
+
id=request_id,
|
|
132
|
+
choices=[choice],
|
|
133
|
+
created=created_time,
|
|
134
|
+
model=model,
|
|
135
|
+
system_fingerprint=None
|
|
136
|
+
)
|
|
137
|
+
chunk.usage = {
|
|
138
|
+
"prompt_tokens": prompt_tokens,
|
|
139
|
+
"completion_tokens": completion_tokens,
|
|
140
|
+
"total_tokens": total_tokens,
|
|
141
|
+
"estimated_cost": None
|
|
142
|
+
}
|
|
143
|
+
yield chunk
|
|
144
|
+
|
|
145
|
+
except Exception as e:
|
|
146
|
+
print(f"Error during MonoChat stream request: {e}")
|
|
147
|
+
raise IOError(f"MonoChat request failed: {e}") from e
|
|
148
|
+
|
|
149
|
+
def _create_non_stream(
|
|
150
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
151
|
+
) -> ChatCompletion:
|
|
152
|
+
try:
|
|
153
|
+
response = self._client.session.post(
|
|
154
|
+
self._client.api_endpoint,
|
|
155
|
+
headers=self._client.headers,
|
|
156
|
+
json=payload,
|
|
157
|
+
stream=True,
|
|
158
|
+
timeout=timeout or self._client.timeout,
|
|
159
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
160
|
+
)
|
|
161
|
+
if not response.ok:
|
|
162
|
+
raise IOError(
|
|
163
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
full_text = ""
|
|
167
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
168
|
+
if line:
|
|
169
|
+
match = re.search(r'0:"(.*?)"', line)
|
|
170
|
+
if match:
|
|
171
|
+
content = match.group(1)
|
|
172
|
+
full_text += content
|
|
173
|
+
|
|
174
|
+
full_text = self._client.format_text(full_text)
|
|
175
|
+
|
|
176
|
+
prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
|
|
177
|
+
completion_tokens = count_tokens(full_text)
|
|
178
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
179
|
+
|
|
180
|
+
message = ChatCompletionMessage(
|
|
181
|
+
role="assistant",
|
|
182
|
+
content=full_text
|
|
183
|
+
)
|
|
184
|
+
choice = Choice(
|
|
185
|
+
index=0,
|
|
186
|
+
message=message,
|
|
187
|
+
finish_reason="stop"
|
|
188
|
+
)
|
|
189
|
+
usage = CompletionUsage(
|
|
190
|
+
prompt_tokens=prompt_tokens,
|
|
191
|
+
completion_tokens=completion_tokens,
|
|
192
|
+
total_tokens=total_tokens
|
|
193
|
+
)
|
|
194
|
+
completion = ChatCompletion(
|
|
195
|
+
id=request_id,
|
|
196
|
+
choices=[choice],
|
|
197
|
+
created=created_time,
|
|
198
|
+
model=model,
|
|
199
|
+
usage=usage,
|
|
200
|
+
)
|
|
201
|
+
return completion
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
print(f"Error during MonoChat non-stream request: {e}")
|
|
205
|
+
raise IOError(f"MonoChat request failed: {e}") from e
|
|
206
|
+
|
|
207
|
+
class Chat(BaseChat):
|
|
208
|
+
def __init__(self, client: 'MonoChat'):
|
|
209
|
+
self.completions = Completions(client)
|
|
210
|
+
|
|
211
|
+
class MonoChat(OpenAICompatibleProvider):
|
|
212
|
+
"""
|
|
213
|
+
OpenAI-compatible client for MonoChat API.
|
|
214
|
+
|
|
215
|
+
Usage:
|
|
216
|
+
client = MonoChat()
|
|
217
|
+
response = client.chat.completions.create(
|
|
218
|
+
model="gpt-4.1",
|
|
219
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
220
|
+
)
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
AVAILABLE_MODELS = [
|
|
224
|
+
"deepseek-r1",
|
|
225
|
+
"deepseek-v3",
|
|
226
|
+
"uncensored-r1-32b",
|
|
227
|
+
"o3-pro",
|
|
228
|
+
"o4-mini",
|
|
229
|
+
"o3",
|
|
230
|
+
"gpt-4.5-preview",
|
|
231
|
+
"gpt-4.1",
|
|
232
|
+
"gpt-4.1-mini",
|
|
233
|
+
"gpt-4.1-nano",
|
|
234
|
+
"gpt-4o",
|
|
235
|
+
"gpt-4o-mini",
|
|
236
|
+
"gpt-4o-search-preview",
|
|
237
|
+
"gpt-4o-mini-search-preview",
|
|
238
|
+
"gpt-4-turbo"
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
]
|
|
242
|
+
|
|
243
|
+
def __init__(
|
|
244
|
+
self,
|
|
245
|
+
browser: str = "chrome"
|
|
246
|
+
):
|
|
247
|
+
"""
|
|
248
|
+
Initialize the MonoChat client.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
browser: Browser to emulate in user agent
|
|
252
|
+
"""
|
|
253
|
+
self.timeout = None
|
|
254
|
+
self.api_endpoint = "https://gg.is-a-furry.dev/api/chat"
|
|
255
|
+
self.session = requests.Session()
|
|
256
|
+
|
|
257
|
+
agent = LitAgent()
|
|
258
|
+
self.fingerprint = agent.generate_fingerprint(browser)
|
|
259
|
+
|
|
260
|
+
self.headers = {
|
|
261
|
+
"accept": "*/*",
|
|
262
|
+
"accept-encoding": "gzip, deflate, br, zstd",
|
|
263
|
+
"accept-language": self.fingerprint["accept_language"],
|
|
264
|
+
"content-type": "application/json",
|
|
265
|
+
"origin": "https://gg.is-a-furry.dev",
|
|
266
|
+
"referer": "https://gg.is-a-furry.dev/",
|
|
267
|
+
"user-agent": self.fingerprint["user_agent"]
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
self.session.headers.update(self.headers)
|
|
271
|
+
self.chat = Chat(self)
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def models(self):
|
|
275
|
+
class _ModelList:
|
|
276
|
+
def list(inner_self):
|
|
277
|
+
return MonoChat.AVAILABLE_MODELS
|
|
278
|
+
return _ModelList()
|
|
279
|
+
|
|
280
|
+
def format_text(self, text: str) -> str:
|
|
281
|
+
"""
|
|
282
|
+
Format text by replacing escaped newlines with actual newlines.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
text: Text to format
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Formatted text
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
text = text.replace('\\\\', '\\')
|
|
292
|
+
text = text.replace('\\n', '\n')
|
|
293
|
+
text = text.replace('\\r', '\r')
|
|
294
|
+
text = text.replace('\\t', '\t')
|
|
295
|
+
text = text.replace('\\"', '"')
|
|
296
|
+
text = text.replace("\\'", "'")
|
|
297
|
+
try:
|
|
298
|
+
json_str = f'"{text}"'
|
|
299
|
+
decoded = json.loads(json_str)
|
|
300
|
+
return decoded
|
|
301
|
+
except json.JSONDecodeError:
|
|
302
|
+
return text
|
|
303
|
+
except Exception as e:
|
|
304
|
+
print(f"Warning: Error formatting text: {e}")
|
|
305
|
+
return text
|
|
306
|
+
|
|
307
|
+
def convert_model_name(self, model: str) -> str:
|
|
308
|
+
"""
|
|
309
|
+
Convert model names to ones supported by MonoChat.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
model: Model name to convert
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
MonoChat model name
|
|
316
|
+
"""
|
|
317
|
+
return model
|
|
318
|
+
|
|
319
|
+
if __name__ == "__main__":
|
|
320
|
+
client = MonoChat()
|
|
321
|
+
response = client.chat.completions.create(
|
|
322
|
+
model="gpt-4.1",
|
|
323
|
+
messages=[{"role": "user", "content": "tell me about humans"}],
|
|
324
|
+
max_tokens=1000,
|
|
325
|
+
stream=True
|
|
326
|
+
)
|
|
327
|
+
for chunk in response:
|
|
328
|
+
if chunk.choices and hasattr(chunk.choices[0], "delta") and getattr(chunk.choices[0].delta, "content", None):
|
|
329
|
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
|
@@ -204,18 +204,15 @@ class Netwrck(OpenAICompatibleProvider):
|
|
|
204
204
|
"""
|
|
205
205
|
|
|
206
206
|
AVAILABLE_MODELS = [
|
|
207
|
-
"
|
|
208
|
-
"x-ai/grok-2",
|
|
209
|
-
"anthropic/claude-3-7-sonnet-20250219",
|
|
207
|
+
"thedrummer/valkyrie-49b-v1",
|
|
210
208
|
"sao10k/l3-euryale-70b",
|
|
209
|
+
"deepseek/deepseek-chat",
|
|
210
|
+
"deepseek/deepseek-r1",
|
|
211
|
+
"anthropic/claude-sonnet-4-20250514",
|
|
211
212
|
"openai/gpt-4.1-mini",
|
|
212
213
|
"gryphe/mythomax-l2-13b",
|
|
213
|
-
"google/gemini-pro-1.5",
|
|
214
214
|
"google/gemini-2.5-flash-preview-04-17",
|
|
215
215
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
216
|
-
"deepseek/deepseek-r1",
|
|
217
|
-
"deepseek/deepseek-chat"
|
|
218
|
-
|
|
219
216
|
]
|
|
220
217
|
|
|
221
218
|
# Default greeting used by Netwrck
|
|
@@ -1,172 +1 @@
|
|
|
1
|
-
from pydantic import
|
|
2
|
-
# dataclasses
|
|
3
|
-
dataclasses,
|
|
4
|
-
# functional validators
|
|
5
|
-
field_validator,
|
|
6
|
-
model_validator,
|
|
7
|
-
AfterValidator,
|
|
8
|
-
BeforeValidator,
|
|
9
|
-
PlainValidator,
|
|
10
|
-
WrapValidator,
|
|
11
|
-
SkipValidation,
|
|
12
|
-
InstanceOf,
|
|
13
|
-
ModelWrapValidatorHandler,
|
|
14
|
-
# JSON Schema
|
|
15
|
-
WithJsonSchema,
|
|
16
|
-
# deprecated V1 functional validators
|
|
17
|
-
root_validator,
|
|
18
|
-
validator,
|
|
19
|
-
# functional serializers
|
|
20
|
-
field_serializer,
|
|
21
|
-
model_serializer,
|
|
22
|
-
PlainSerializer,
|
|
23
|
-
SerializeAsAny,
|
|
24
|
-
WrapSerializer,
|
|
25
|
-
# config
|
|
26
|
-
ConfigDict,
|
|
27
|
-
with_config,
|
|
28
|
-
# deprecated V1 config
|
|
29
|
-
BaseConfig,
|
|
30
|
-
Extra,
|
|
31
|
-
# validate_call
|
|
32
|
-
validate_call,
|
|
33
|
-
# errors
|
|
34
|
-
PydanticErrorCodes,
|
|
35
|
-
PydanticUserError,
|
|
36
|
-
PydanticSchemaGenerationError,
|
|
37
|
-
PydanticImportError,
|
|
38
|
-
PydanticUndefinedAnnotation,
|
|
39
|
-
PydanticInvalidForJsonSchema,
|
|
40
|
-
PydanticForbiddenQualifier,
|
|
41
|
-
# fields
|
|
42
|
-
Field,
|
|
43
|
-
computed_field,
|
|
44
|
-
PrivateAttr,
|
|
45
|
-
# alias
|
|
46
|
-
AliasChoices,
|
|
47
|
-
AliasGenerator,
|
|
48
|
-
AliasPath,
|
|
49
|
-
# main
|
|
50
|
-
BaseModel,
|
|
51
|
-
create_model,
|
|
52
|
-
# network
|
|
53
|
-
AnyUrl,
|
|
54
|
-
AnyHttpUrl,
|
|
55
|
-
FileUrl,
|
|
56
|
-
HttpUrl,
|
|
57
|
-
FtpUrl,
|
|
58
|
-
WebsocketUrl,
|
|
59
|
-
AnyWebsocketUrl,
|
|
60
|
-
UrlConstraints,
|
|
61
|
-
EmailStr,
|
|
62
|
-
NameEmail,
|
|
63
|
-
IPvAnyAddress,
|
|
64
|
-
IPvAnyInterface,
|
|
65
|
-
IPvAnyNetwork,
|
|
66
|
-
PostgresDsn,
|
|
67
|
-
CockroachDsn,
|
|
68
|
-
AmqpDsn,
|
|
69
|
-
RedisDsn,
|
|
70
|
-
MongoDsn,
|
|
71
|
-
KafkaDsn,
|
|
72
|
-
NatsDsn,
|
|
73
|
-
MySQLDsn,
|
|
74
|
-
MariaDBDsn,
|
|
75
|
-
ClickHouseDsn,
|
|
76
|
-
SnowflakeDsn,
|
|
77
|
-
validate_email,
|
|
78
|
-
# root_model
|
|
79
|
-
RootModel,
|
|
80
|
-
# deprecated tools
|
|
81
|
-
parse_obj_as,
|
|
82
|
-
schema_of,
|
|
83
|
-
schema_json_of,
|
|
84
|
-
# types
|
|
85
|
-
Strict,
|
|
86
|
-
StrictStr,
|
|
87
|
-
conbytes,
|
|
88
|
-
conlist,
|
|
89
|
-
conset,
|
|
90
|
-
confrozenset,
|
|
91
|
-
constr,
|
|
92
|
-
StringConstraints,
|
|
93
|
-
ImportString,
|
|
94
|
-
conint,
|
|
95
|
-
PositiveInt,
|
|
96
|
-
NegativeInt,
|
|
97
|
-
NonNegativeInt,
|
|
98
|
-
NonPositiveInt,
|
|
99
|
-
confloat,
|
|
100
|
-
PositiveFloat,
|
|
101
|
-
NegativeFloat,
|
|
102
|
-
NonNegativeFloat,
|
|
103
|
-
NonPositiveFloat,
|
|
104
|
-
FiniteFloat,
|
|
105
|
-
condecimal,
|
|
106
|
-
condate,
|
|
107
|
-
UUID1,
|
|
108
|
-
UUID3,
|
|
109
|
-
UUID4,
|
|
110
|
-
UUID5,
|
|
111
|
-
UUID6,
|
|
112
|
-
UUID7,
|
|
113
|
-
UUID8,
|
|
114
|
-
FilePath,
|
|
115
|
-
DirectoryPath,
|
|
116
|
-
NewPath,
|
|
117
|
-
Json,
|
|
118
|
-
Secret,
|
|
119
|
-
SecretStr,
|
|
120
|
-
SecretBytes,
|
|
121
|
-
SocketPath,
|
|
122
|
-
StrictBool,
|
|
123
|
-
StrictBytes,
|
|
124
|
-
StrictInt,
|
|
125
|
-
StrictFloat,
|
|
126
|
-
PaymentCardNumber,
|
|
127
|
-
ByteSize,
|
|
128
|
-
PastDate,
|
|
129
|
-
FutureDate,
|
|
130
|
-
PastDatetime,
|
|
131
|
-
FutureDatetime,
|
|
132
|
-
AwareDatetime,
|
|
133
|
-
NaiveDatetime,
|
|
134
|
-
AllowInfNan,
|
|
135
|
-
EncoderProtocol,
|
|
136
|
-
EncodedBytes,
|
|
137
|
-
EncodedStr,
|
|
138
|
-
Base64Encoder,
|
|
139
|
-
Base64Bytes,
|
|
140
|
-
Base64Str,
|
|
141
|
-
Base64UrlBytes,
|
|
142
|
-
Base64UrlStr,
|
|
143
|
-
GetPydanticSchema,
|
|
144
|
-
Tag,
|
|
145
|
-
Discriminator,
|
|
146
|
-
JsonValue,
|
|
147
|
-
FailFast,
|
|
148
|
-
# type_adapter
|
|
149
|
-
TypeAdapter,
|
|
150
|
-
# version
|
|
151
|
-
__version__,
|
|
152
|
-
VERSION,
|
|
153
|
-
# warnings
|
|
154
|
-
PydanticDeprecatedSince20,
|
|
155
|
-
PydanticDeprecatedSince26,
|
|
156
|
-
PydanticDeprecatedSince29,
|
|
157
|
-
PydanticDeprecatedSince210,
|
|
158
|
-
PydanticDeprecatedSince211,
|
|
159
|
-
PydanticDeprecationWarning,
|
|
160
|
-
PydanticExperimentalWarning,
|
|
161
|
-
# annotated handlers
|
|
162
|
-
GetCoreSchemaHandler,
|
|
163
|
-
GetJsonSchemaHandler,
|
|
164
|
-
# pydantic_core
|
|
165
|
-
ValidationError,
|
|
166
|
-
ValidationInfo,
|
|
167
|
-
SerializationInfo,
|
|
168
|
-
ValidatorFunctionWrapHandler,
|
|
169
|
-
FieldSerializationInfo,
|
|
170
|
-
SerializerFunctionWrapHandler,
|
|
171
|
-
OnErrorOmit,
|
|
172
|
-
)
|
|
1
|
+
from pydantic import *
|