pydantic-ai-slim 0.0.54__tar.gz → 0.0.55__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (51) hide show
  1. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/agent.py +3 -3
  3. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/__init__.py +1 -1
  4. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/anthropic.py +9 -1
  5. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/groq.py +2 -1
  6. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/mistral.py +5 -0
  7. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/openai.py +3 -0
  8. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/wrapper.py +3 -0
  9. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/__init__.py +4 -0
  10. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/azure.py +2 -2
  11. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/.gitignore +0 -0
  12. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/README.md +0 -0
  13. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/__init__.py +0 -0
  14. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/__main__.py +0 -0
  15. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_agent_graph.py +0 -0
  16. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_cli.py +0 -0
  17. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_griffe.py +0 -0
  18. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_parts_manager.py +0 -0
  19. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_pydantic.py +0 -0
  20. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_result.py +0 -0
  21. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_system_prompt.py +0 -0
  22. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/_utils.py +0 -0
  23. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/common_tools/__init__.py +0 -0
  24. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  25. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/common_tools/tavily.py +0 -0
  26. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/exceptions.py +0 -0
  27. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/format_as_xml.py +0 -0
  28. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/mcp.py +0 -0
  29. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/messages.py +0 -0
  30. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/bedrock.py +0 -0
  31. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/cohere.py +0 -0
  32. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/fallback.py +0 -0
  33. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/function.py +0 -0
  34. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/gemini.py +0 -0
  35. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/instrumented.py +0 -0
  36. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/models/test.py +0 -0
  37. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/anthropic.py +0 -0
  38. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/bedrock.py +0 -0
  39. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/cohere.py +0 -0
  40. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/deepseek.py +0 -0
  41. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/google_gla.py +0 -0
  42. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/google_vertex.py +0 -0
  43. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/groq.py +0 -0
  44. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/mistral.py +0 -0
  45. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/providers/openai.py +0 -0
  46. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/py.typed +0 -0
  47. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/result.py +0 -0
  48. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/settings.py +0 -0
  49. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/tools.py +0 -0
  50. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pydantic_ai/usage.py +0 -0
  51. {pydantic_ai_slim-0.0.54 → pydantic_ai_slim-0.0.55}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.54
3
+ Version: 0.0.55
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.0.54
32
+ Requires-Dist: pydantic-graph==0.0.55
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.0.54; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.0.55; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
@@ -592,7 +592,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
592
592
  @overload
593
593
  def run_stream(
594
594
  self,
595
- user_prompt: str | Sequence[_messages.UserContent],
595
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
596
596
  *,
597
597
  result_type: None = None,
598
598
  message_history: list[_messages.ModelMessage] | None = None,
@@ -607,7 +607,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
607
607
  @overload
608
608
  def run_stream(
609
609
  self,
610
- user_prompt: str | Sequence[_messages.UserContent],
610
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
611
611
  *,
612
612
  result_type: type[RunResultDataT],
613
613
  message_history: list[_messages.ModelMessage] | None = None,
@@ -622,7 +622,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
622
622
  @asynccontextmanager
623
623
  async def run_stream( # noqa C901
624
624
  self,
625
- user_prompt: str | Sequence[_messages.UserContent],
625
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
626
626
  *,
627
627
  result_type: type[RunResultDataT] | None = None,
628
628
  message_history: list[_messages.ModelMessage] | None = None,
@@ -427,7 +427,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
427
427
  from .cohere import CohereModel
428
428
 
429
429
  return CohereModel(model_name, provider=provider)
430
- elif provider in ('deepseek', 'openai'):
430
+ elif provider in ('deepseek', 'openai', 'azure'):
431
431
  from .openai import OpenAIModel
432
432
 
433
433
  return OpenAIModel(model_name, provider=provider)
@@ -31,7 +31,14 @@ from ..messages import (
31
31
  from ..providers import Provider, infer_provider
32
32
  from ..settings import ModelSettings
33
33
  from ..tools import ToolDefinition
34
- from . import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client, check_allow_model_requests
34
+ from . import (
35
+ Model,
36
+ ModelRequestParameters,
37
+ StreamedResponse,
38
+ cached_async_http_client,
39
+ check_allow_model_requests,
40
+ get_user_agent,
41
+ )
35
42
 
36
43
  try:
37
44
  from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
@@ -231,6 +238,7 @@ class AnthropicModel(Model):
231
238
  top_p=model_settings.get('top_p', NOT_GIVEN),
232
239
  timeout=model_settings.get('timeout', NOT_GIVEN),
233
240
  metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
241
+ extra_headers={'User-Agent': get_user_agent()},
234
242
  )
235
243
  except APIStatusError as e:
236
244
  if (status_code := e.status_code) >= 400:
@@ -31,7 +31,7 @@ from ..messages import (
31
31
  from ..providers import Provider, infer_provider
32
32
  from ..settings import ModelSettings
33
33
  from ..tools import ToolDefinition
34
- from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests
34
+ from . import Model, ModelRequestParameters, StreamedResponse, check_allow_model_requests, get_user_agent
35
35
 
36
36
  try:
37
37
  from groq import NOT_GIVEN, APIStatusError, AsyncGroq, AsyncStream
@@ -218,6 +218,7 @@ class GroqModel(Model):
218
218
  presence_penalty=model_settings.get('presence_penalty', NOT_GIVEN),
219
219
  frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
220
220
  logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
221
+ extra_headers={'User-Agent': get_user_agent()},
221
222
  )
222
223
  except APIStatusError as e:
223
224
  if (status_code := e.status_code) >= 400:
@@ -39,6 +39,7 @@ from . import (
39
39
  ModelRequestParameters,
40
40
  StreamedResponse,
41
41
  check_allow_model_requests,
42
+ get_user_agent,
42
43
  )
43
44
 
44
45
  try:
@@ -200,6 +201,7 @@ class MistralModel(Model):
200
201
  timeout_ms=self._get_timeout_ms(model_settings.get('timeout')),
201
202
  random_seed=model_settings.get('seed', UNSET),
202
203
  stop=model_settings.get('stop_sequences', None),
204
+ http_headers={'User-Agent': get_user_agent()},
203
205
  )
204
206
  except SDKError as e:
205
207
  if (status_code := e.status_code) >= 400:
@@ -238,6 +240,7 @@ class MistralModel(Model):
238
240
  presence_penalty=model_settings.get('presence_penalty'),
239
241
  frequency_penalty=model_settings.get('frequency_penalty'),
240
242
  stop=model_settings.get('stop_sequences', None),
243
+ http_headers={'User-Agent': get_user_agent()},
241
244
  )
242
245
 
243
246
  elif model_request_parameters.result_tools:
@@ -251,6 +254,7 @@ class MistralModel(Model):
251
254
  messages=mistral_messages,
252
255
  response_format={'type': 'json_object'},
253
256
  stream=True,
257
+ http_headers={'User-Agent': get_user_agent()},
254
258
  )
255
259
 
256
260
  else:
@@ -259,6 +263,7 @@ class MistralModel(Model):
259
263
  model=str(self._model_name),
260
264
  messages=mistral_messages,
261
265
  stream=True,
266
+ http_headers={'User-Agent': get_user_agent()},
262
267
  )
263
268
  assert response, 'A unexpected empty response from Mistral.'
264
269
  return response
@@ -39,6 +39,7 @@ from . import (
39
39
  StreamedResponse,
40
40
  cached_async_http_client,
41
41
  check_allow_model_requests,
42
+ get_user_agent,
42
43
  )
43
44
 
44
45
  try:
@@ -282,6 +283,7 @@ class OpenAIModel(Model):
282
283
  logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
283
284
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
284
285
  user=model_settings.get('openai_user', NOT_GIVEN),
286
+ extra_headers={'User-Agent': get_user_agent()},
285
287
  )
286
288
  except APIStatusError as e:
287
289
  if (status_code := e.status_code) >= 400:
@@ -613,6 +615,7 @@ class OpenAIResponsesModel(Model):
613
615
  timeout=model_settings.get('timeout', NOT_GIVEN),
614
616
  reasoning=reasoning,
615
617
  user=model_settings.get('openai_user', NOT_GIVEN),
618
+ extra_headers={'User-Agent': get_user_agent()},
616
619
  )
617
620
  except APIStatusError as e:
618
621
  if (status_code := e.status_code) >= 400:
@@ -37,6 +37,9 @@ class WrapperModel(Model):
37
37
  async with self.wrapped.request_stream(messages, model_settings, model_request_parameters) as response_stream:
38
38
  yield response_stream
39
39
 
40
+ def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
41
+ return self.wrapped.customize_request_parameters(model_request_parameters)
42
+
40
43
  @property
41
44
  def model_name(self) -> str:
42
45
  return self.wrapped.model_name
@@ -52,6 +52,10 @@ def infer_provider(provider: str) -> Provider[Any]:
52
52
  from .deepseek import DeepSeekProvider
53
53
 
54
54
  return DeepSeekProvider()
55
+ elif provider == 'azure':
56
+ from .azure import AzureProvider
57
+
58
+ return AzureProvider()
55
59
  elif provider == 'google-vertex':
56
60
  from .google_vertex import GoogleVertexProvider
57
61
 
@@ -87,9 +87,9 @@ class AzureProvider(Provider[AsyncOpenAI]):
87
87
  'Must provide one of the `azure_endpoint` argument or the `AZURE_OPENAI_ENDPOINT` environment variable'
88
88
  )
89
89
 
90
- if not api_key and 'OPENAI_API_KEY' not in os.environ: # pragma: no cover
90
+ if not api_key and 'AZURE_OPENAI_API_KEY' not in os.environ: # pragma: no cover
91
91
  raise UserError(
92
- 'Must provide one of the `api_key` argument or the `OPENAI_API_KEY` environment variable'
92
+ 'Must provide one of the `api_key` argument or the `AZURE_OPENAI_API_KEY` environment variable'
93
93
  )
94
94
 
95
95
  if not api_version and 'OPENAI_API_VERSION' not in os.environ: # pragma: no cover