pydantic-ai-slim 0.0.54__py3-none-any.whl → 0.0.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/agent.py +3 -3
- pydantic_ai/models/__init__.py +1 -1
- pydantic_ai/models/anthropic.py +9 -1
- pydantic_ai/models/groq.py +2 -1
- pydantic_ai/models/mistral.py +5 -0
- pydantic_ai/models/openai.py +3 -0
- pydantic_ai/models/wrapper.py +3 -0
- pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai/providers/azure.py +2 -2
- {pydantic_ai_slim-0.0.54.dist-info → pydantic_ai_slim-0.0.55.dist-info}/METADATA +3 -3
- {pydantic_ai_slim-0.0.54.dist-info → pydantic_ai_slim-0.0.55.dist-info}/RECORD +13 -13
- {pydantic_ai_slim-0.0.54.dist-info → pydantic_ai_slim-0.0.55.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-0.0.54.dist-info → pydantic_ai_slim-0.0.55.dist-info}/entry_points.txt +0 -0
pydantic_ai/agent.py
CHANGED
|
@@ -592,7 +592,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
592
592
|
@overload
|
|
593
593
|
def run_stream(
|
|
594
594
|
self,
|
|
595
|
-
user_prompt: str | Sequence[_messages.UserContent],
|
|
595
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
596
596
|
*,
|
|
597
597
|
result_type: None = None,
|
|
598
598
|
message_history: list[_messages.ModelMessage] | None = None,
|
|
@@ -607,7 +607,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
607
607
|
@overload
|
|
608
608
|
def run_stream(
|
|
609
609
|
self,
|
|
610
|
-
user_prompt: str | Sequence[_messages.UserContent],
|
|
610
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
611
611
|
*,
|
|
612
612
|
result_type: type[RunResultDataT],
|
|
613
613
|
message_history: list[_messages.ModelMessage] | None = None,
|
|
@@ -622,7 +622,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
622
622
|
@asynccontextmanager
|
|
623
623
|
async def run_stream( # noqa C901
|
|
624
624
|
self,
|
|
625
|
-
user_prompt: str | Sequence[_messages.UserContent],
|
|
625
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
626
626
|
*,
|
|
627
627
|
result_type: type[RunResultDataT] | None = None,
|
|
628
628
|
message_history: list[_messages.ModelMessage] | None = None,
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -427,7 +427,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
|
|
|
427
427
|
from .cohere import CohereModel
|
|
428
428
|
|
|
429
429
|
return CohereModel(model_name, provider=provider)
|
|
430
|
-
elif provider in ('deepseek', 'openai'):
|
|
430
|
+
elif provider in ('deepseek', 'openai', 'azure'):
|
|
431
431
|
from .openai import OpenAIModel
|
|
432
432
|
|
|
433
433
|
return OpenAIModel(model_name, provider=provider)
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -31,7 +31,14 @@ from ..messages import (
|
|
|
31
31
|
from ..providers import Provider, infer_provider
|
|
32
32
|
from ..settings import ModelSettings
|
|
33
33
|
from ..tools import ToolDefinition
|
|
34
|
-
from . import
|
|
34
|
+
from . import (
|
|
35
|
+
Model,
|
|
36
|
+
ModelRequestParameters,
|
|
37
|
+
StreamedResponse,
|
|
38
|
+
cached_async_http_client,
|
|
39
|
+
check_allow_model_requests,
|
|
40
|
+
get_user_agent,
|
|
41
|
+
)
|
|
35
42
|
|
|
36
43
|
try:
|
|
37
44
|
from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
|
|
@@ -231,6 +238,7 @@ class AnthropicModel(Model):
|
|
|
231
238
|
top_p=model_settings.get('top_p', NOT_GIVEN),
|
|
232
239
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
233
240
|
metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
|
|
241
|
+
extra_headers={'User-Agent': get_user_agent()},
|
|
234
242
|
)
|
|
235
243
|
except APIStatusError as e:
|
|
236
244
|
if (status_code := e.status_code) >= 400:
|
pydantic_ai/models/groq.py
CHANGED
|
@@ -31,7 +31,7 @@ from ..messages import (
|
|
|
31
31
|
from ..providers import Provider, infer_provider
|
|
32
32
|
from ..settings import ModelSettings
|
|
33
33
|
from ..tools import ToolDefinition
|
|
34
|
-
from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests
|
|
34
|
+
from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, get_user_agent
|
|
35
35
|
|
|
36
36
|
try:
|
|
37
37
|
from groq import NOT_GIVEN, APIStatusError, AsyncGroq, AsyncStream
|
|
@@ -218,6 +218,7 @@ class GroqModel(Model):
|
|
|
218
218
|
presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
|
|
219
219
|
frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
|
|
220
220
|
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
|
|
221
|
+
extra_headers={'User-Agent': get_user_agent()},
|
|
221
222
|
)
|
|
222
223
|
except APIStatusError as e:
|
|
223
224
|
if (status_code := e.status_code) >= 400:
|
pydantic_ai/models/mistral.py
CHANGED
|
@@ -39,6 +39,7 @@ from . import (
|
|
|
39
39
|
ModelRequestParameters,
|
|
40
40
|
StreamedResponse,
|
|
41
41
|
check_allow_model_requests,
|
|
42
|
+
get_user_agent,
|
|
42
43
|
)
|
|
43
44
|
|
|
44
45
|
try:
|
|
@@ -200,6 +201,7 @@ class MistralModel(Model):
|
|
|
200
201
|
timeout_ms=self._get_timeout_ms(model_settings.get('timeout')),
|
|
201
202
|
random_seed=model_settings.get('seed', UNSET),
|
|
202
203
|
stop=model_settings.get('stop_sequences', None),
|
|
204
|
+
http_headers={'User-Agent': get_user_agent()},
|
|
203
205
|
)
|
|
204
206
|
except SDKError as e:
|
|
205
207
|
if (status_code := e.status_code) >= 400:
|
|
@@ -238,6 +240,7 @@ class MistralModel(Model):
|
|
|
238
240
|
presence_penalty=model_settings.get('presence_penalty'),
|
|
239
241
|
frequency_penalty=model_settings.get('frequency_penalty'),
|
|
240
242
|
stop=model_settings.get('stop_sequences', None),
|
|
243
|
+
http_headers={'User-Agent': get_user_agent()},
|
|
241
244
|
)
|
|
242
245
|
|
|
243
246
|
elif model_request_parameters.result_tools:
|
|
@@ -251,6 +254,7 @@ class MistralModel(Model):
|
|
|
251
254
|
messages=mistral_messages,
|
|
252
255
|
response_format={'type': 'json_object'},
|
|
253
256
|
stream=True,
|
|
257
|
+
http_headers={'User-Agent': get_user_agent()},
|
|
254
258
|
)
|
|
255
259
|
|
|
256
260
|
else:
|
|
@@ -259,6 +263,7 @@ class MistralModel(Model):
|
|
|
259
263
|
model=str(self._model_name),
|
|
260
264
|
messages=mistral_messages,
|
|
261
265
|
stream=True,
|
|
266
|
+
http_headers={'User-Agent': get_user_agent()},
|
|
262
267
|
)
|
|
263
268
|
assert response, 'A unexpected empty response from Mistral.'
|
|
264
269
|
return response
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -39,6 +39,7 @@ from . import (
|
|
|
39
39
|
StreamedResponse,
|
|
40
40
|
cached_async_http_client,
|
|
41
41
|
check_allow_model_requests,
|
|
42
|
+
get_user_agent,
|
|
42
43
|
)
|
|
43
44
|
|
|
44
45
|
try:
|
|
@@ -282,6 +283,7 @@ class OpenAIModel(Model):
|
|
|
282
283
|
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
|
|
283
284
|
reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
|
|
284
285
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
286
|
+
extra_headers={'User-Agent': get_user_agent()},
|
|
285
287
|
)
|
|
286
288
|
except APIStatusError as e:
|
|
287
289
|
if (status_code := e.status_code) >= 400:
|
|
@@ -613,6 +615,7 @@ class OpenAIResponsesModel(Model):
|
|
|
613
615
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
614
616
|
reasoning=reasoning,
|
|
615
617
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
618
|
+
extra_headers={'User-Agent': get_user_agent()},
|
|
616
619
|
)
|
|
617
620
|
except APIStatusError as e:
|
|
618
621
|
if (status_code := e.status_code) >= 400:
|
pydantic_ai/models/wrapper.py
CHANGED
|
@@ -37,6 +37,9 @@ class WrapperModel(Model):
|
|
|
37
37
|
async with self.wrapped.request_stream(messages, model_settings, model_request_parameters) as response_stream:
|
|
38
38
|
yield response_stream
|
|
39
39
|
|
|
40
|
+
def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
|
|
41
|
+
return self.wrapped.customize_request_parameters(model_request_parameters)
|
|
42
|
+
|
|
40
43
|
@property
|
|
41
44
|
def model_name(self) -> str:
|
|
42
45
|
return self.wrapped.model_name
|
|
@@ -52,6 +52,10 @@ def infer_provider(provider: str) -> Provider[Any]:
|
|
|
52
52
|
from .deepseek import DeepSeekProvider
|
|
53
53
|
|
|
54
54
|
return DeepSeekProvider()
|
|
55
|
+
elif provider == 'azure':
|
|
56
|
+
from .azure import AzureProvider
|
|
57
|
+
|
|
58
|
+
return AzureProvider()
|
|
55
59
|
elif provider == 'google-vertex':
|
|
56
60
|
from .google_vertex import GoogleVertexProvider
|
|
57
61
|
|
pydantic_ai/providers/azure.py
CHANGED
|
@@ -87,9 +87,9 @@ class AzureProvider(Provider[AsyncOpenAI]):
|
|
|
87
87
|
'Must provide one of the `azure_endpoint` argument or the `AZURE_OPENAI_ENDPOINT` environment variable'
|
|
88
88
|
)
|
|
89
89
|
|
|
90
|
-
if not api_key and '
|
|
90
|
+
if not api_key and 'AZURE_OPENAI_API_KEY' not in os.environ: # pragma: no cover
|
|
91
91
|
raise UserError(
|
|
92
|
-
'Must provide one of the `api_key` argument or the `
|
|
92
|
+
'Must provide one of the `api_key` argument or the `AZURE_OPENAI_API_KEY` environment variable'
|
|
93
93
|
)
|
|
94
94
|
|
|
95
95
|
if not api_version and 'OPENAI_API_VERSION' not in os.environ: # pragma: no cover
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.55
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.55
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.0.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.0.55; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -8,7 +8,7 @@ pydantic_ai/_pydantic.py,sha256=12hX5hON88meO1QxbWrEPXSvr6RTNgr6ubKY6KRwab4,8890
|
|
|
8
8
|
pydantic_ai/_result.py,sha256=9cDWMiXv3ef_qPywv02SAH_8r3SS5FVfxm5iAYDI_is,10375
|
|
9
9
|
pydantic_ai/_system_prompt.py,sha256=602c2jyle2R_SesOrITBDETZqsLk4BZ8Cbo8yEhmx04,1120
|
|
10
10
|
pydantic_ai/_utils.py,sha256=z2NtK11U3_N6kLIKTPUj830vGusbj6FvfYEc0WCtfsI,9893
|
|
11
|
-
pydantic_ai/agent.py,sha256=
|
|
11
|
+
pydantic_ai/agent.py,sha256=CEKFn_uD5ZMLxKNOwa01OgYxpYa1imtdPRwOq_GxSEs,70797
|
|
12
12
|
pydantic_ai/exceptions.py,sha256=gvbFsFkAzSXOo_d1nfjy09kDHUGv1j5q70Uk-wKYGi8,3167
|
|
13
13
|
pydantic_ai/format_as_xml.py,sha256=QE7eMlg5-YUMw1_2kcI3h0uKYPZZyGkgXFDtfZTMeeI,4480
|
|
14
14
|
pydantic_ai/mcp.py,sha256=wlu3GCdcjgsag75cyuTron1kdTQOWkM0j5hNJxf0JkE,8242
|
|
@@ -21,22 +21,22 @@ pydantic_ai/usage.py,sha256=9sqoIv_RVVUhKXQScTDqUJc074gifsuSzc9_NOt7C3g,5394
|
|
|
21
21
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
22
|
pydantic_ai/common_tools/duckduckgo.py,sha256=Iw8Dl2YQ28S483mzfa8CXs-dc-ujS8un085R2O6oOEw,2241
|
|
23
23
|
pydantic_ai/common_tools/tavily.py,sha256=h8deBDrpG-8BGzydM_zXs7z1ASrhdVvUxL4-CAbncBo,2589
|
|
24
|
-
pydantic_ai/models/__init__.py,sha256
|
|
25
|
-
pydantic_ai/models/anthropic.py,sha256=
|
|
24
|
+
pydantic_ai/models/__init__.py,sha256=-gLWDsZuzdCBC9sMMCX9P1eVigywM0LK-BBIVoPq1VA,18888
|
|
25
|
+
pydantic_ai/models/anthropic.py,sha256=8YI4s1PfrbI_t8QDMsiXZ0bL7WBdAtUMCtfYuu0qJUI,20146
|
|
26
26
|
pydantic_ai/models/bedrock.py,sha256=TPycUfKFPcSicUJHdsxE4j1W0pJbjFpW3XMAyb546_s,20777
|
|
27
27
|
pydantic_ai/models/cohere.py,sha256=o4QXvPA7QHigNm0TrIuJeJXfJjSpHr8AHPvO-IO3aIk,11386
|
|
28
28
|
pydantic_ai/models/fallback.py,sha256=y0bYXM3DfzJNAsyyMzclt33lzZazL-5_hwdgc33gfuM,4876
|
|
29
29
|
pydantic_ai/models/function.py,sha256=HUSgPB3mKVfYI0OSJJJJRiQN-yeewjYIbrtrPfsvlgI,11365
|
|
30
30
|
pydantic_ai/models/gemini.py,sha256=GD-K8ZZHU9t89pjlEezanpE-n_UbYmpm8TTln4Fpn-Q,34090
|
|
31
|
-
pydantic_ai/models/groq.py,sha256=
|
|
31
|
+
pydantic_ai/models/groq.py,sha256=RzKkU0b3nF__6F5eLCCqXb-xDpX3ZD9Ky53DbtmRfhM,16668
|
|
32
32
|
pydantic_ai/models/instrumented.py,sha256=ErFRDiOehOYlJBp4mSNj7yEIMtMqjlGcamEAwgW_Il4,11163
|
|
33
|
-
pydantic_ai/models/mistral.py,sha256=
|
|
34
|
-
pydantic_ai/models/openai.py,sha256=
|
|
33
|
+
pydantic_ai/models/mistral.py,sha256=0MvCVD-HVJpD9dtybXY8dHN4AVzbn4oD-1cJ889QLZE,27841
|
|
34
|
+
pydantic_ai/models/openai.py,sha256=QHYF-gij7lXGZLht8zH8WormIG_TqwuKqIDWe2Ze0ek,48108
|
|
35
35
|
pydantic_ai/models/test.py,sha256=qQ8ZIaVRdbJv-tKGu6lrdakVAhOsTlyf68TFWyGwOWE,16861
|
|
36
|
-
pydantic_ai/models/wrapper.py,sha256=
|
|
37
|
-
pydantic_ai/providers/__init__.py,sha256=
|
|
36
|
+
pydantic_ai/models/wrapper.py,sha256=8wm4RF-MRZOxRVLefwMsxToopCX5Y4Xq2-Ugs5MtCK4,1710
|
|
37
|
+
pydantic_ai/providers/__init__.py,sha256=lLlHq6B8qmu6Ag5biaZmJGDKELO46KjwP7-CDrz_T4Y,2592
|
|
38
38
|
pydantic_ai/providers/anthropic.py,sha256=0WzWEDseBaJ5eyEatvnDXBtDZKA9-od4BuPZn9NoTPw,2812
|
|
39
|
-
pydantic_ai/providers/azure.py,sha256=
|
|
39
|
+
pydantic_ai/providers/azure.py,sha256=2tAE-bLjXY-DvVrVc4ilQe15HhoHP9neAbvwCaCx_uo,4225
|
|
40
40
|
pydantic_ai/providers/bedrock.py,sha256=BV1Zi4asU4Bmcv4t7VRIy2U44Tk_Jrf26x8_mPJiYHQ,3216
|
|
41
41
|
pydantic_ai/providers/cohere.py,sha256=WOFZCllgVbWciF4nNkG3pCqw4poy57VEGyux2mVntbQ,2667
|
|
42
42
|
pydantic_ai/providers/deepseek.py,sha256=_5JPzDGWsyVyTBX-yYYdy5aZwUOWNCVgoWI-UoBamms,2193
|
|
@@ -45,7 +45,7 @@ pydantic_ai/providers/google_vertex.py,sha256=WAwPxKTARVzs8DFs2veEUOJSur0krDOo9-
|
|
|
45
45
|
pydantic_ai/providers/groq.py,sha256=DoY6qkfhuemuKB5JXhUkqG-3t1HQkxwSXoE_kHQIAK0,2788
|
|
46
46
|
pydantic_ai/providers/mistral.py,sha256=fcR1uSwORo0jtevX7-wOjvcfT8ojMAaKY81uN5uYymM,2661
|
|
47
47
|
pydantic_ai/providers/openai.py,sha256=ePF-QWwLkGkSE5w245gTTDVR3VoTIUqFoIhQ0TAoUiA,2866
|
|
48
|
-
pydantic_ai_slim-0.0.
|
|
49
|
-
pydantic_ai_slim-0.0.
|
|
50
|
-
pydantic_ai_slim-0.0.
|
|
51
|
-
pydantic_ai_slim-0.0.
|
|
48
|
+
pydantic_ai_slim-0.0.55.dist-info/METADATA,sha256=aLLABT9Xs5HIHwQe4akRbffwIGp_NGIXnckwTVaxZKA,3555
|
|
49
|
+
pydantic_ai_slim-0.0.55.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
50
|
+
pydantic_ai_slim-0.0.55.dist-info/entry_points.txt,sha256=KxQSmlMS8GMTkwTsl4_q9a5nJvBjj3HWeXx688wLrKg,45
|
|
51
|
+
pydantic_ai_slim-0.0.55.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|