pydantic-ai-slim 0.0.39__tar.gz → 0.0.41__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (49) hide show
  1. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/PKG-INFO +2 -2
  2. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/__init__.py +8 -2
  3. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/anthropic.py +44 -12
  4. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/bedrock.py +2 -2
  5. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/cohere.py +2 -2
  6. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/fallback.py +21 -8
  7. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/function.py +2 -2
  8. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/gemini.py +5 -7
  9. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/groq.py +4 -5
  10. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/instrumented.py +2 -8
  11. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/mistral.py +39 -7
  12. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/openai.py +9 -10
  13. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/test.py +2 -2
  14. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/vertexai.py +2 -2
  15. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/models/wrapper.py +1 -1
  16. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/__init__.py +8 -0
  17. pydantic_ai_slim-0.0.41/pydantic_ai/providers/anthropic.py +74 -0
  18. pydantic_ai_slim-0.0.41/pydantic_ai/providers/azure.py +108 -0
  19. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/bedrock.py +1 -1
  20. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/deepseek.py +1 -1
  21. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/google_vertex.py +1 -1
  22. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/groq.py +1 -3
  23. pydantic_ai_slim-0.0.41/pydantic_ai/providers/mistral.py +73 -0
  24. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/openai.py +2 -5
  25. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pyproject.toml +2 -2
  26. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/.gitignore +0 -0
  27. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/README.md +0 -0
  28. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/__init__.py +0 -0
  29. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_agent_graph.py +0 -0
  30. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_cli.py +0 -0
  31. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_griffe.py +0 -0
  32. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_parts_manager.py +0 -0
  33. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_pydantic.py +0 -0
  34. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_result.py +0 -0
  35. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_system_prompt.py +0 -0
  36. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/_utils.py +0 -0
  37. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/agent.py +1 -1
  38. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/common_tools/__init__.py +0 -0
  39. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  40. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/common_tools/tavily.py +0 -0
  41. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/exceptions.py +0 -0
  42. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/format_as_xml.py +0 -0
  43. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/messages.py +0 -0
  44. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/providers/google_gla.py +0 -0
  45. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/py.typed +0 -0
  46. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/result.py +0 -0
  47. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/settings.py +0 -0
  48. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/tools.py +0 -0
  49. {pydantic_ai_slim-0.0.39 → pydantic_ai_slim-0.0.41}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.0.39
3
+ Version: 0.0.41
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.0.39
32
+ Requires-Dist: pydantic-graph==0.0.41
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -262,8 +262,14 @@ class Model(ABC):
262
262
 
263
263
  @property
264
264
  @abstractmethod
265
- def system(self) -> str | None:
266
- """The system / model provider, ex: openai."""
265
+ def system(self) -> str:
266
+ """The system / model provider, ex: openai.
267
+
268
+ Use to populate the `gen_ai.system` OpenTelemetry semantic convention attribute,
269
+ so should use well-known values listed in
270
+ https://opentelemetry.io/docs/specs/semconv/attributes-registry/gen-ai/#gen-ai-system
271
+ when applicable.
272
+ """
267
273
  raise NotImplementedError()
268
274
 
269
275
  @property
@@ -11,7 +11,7 @@ from typing import Any, Literal, Union, cast, overload
11
11
 
12
12
  from anthropic.types import DocumentBlockParam
13
13
  from httpx import AsyncClient as AsyncHTTPClient
14
- from typing_extensions import assert_never
14
+ from typing_extensions import assert_never, deprecated
15
15
 
16
16
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
17
17
  from .._utils import guard_tool_call_id as _guard_tool_call_id
@@ -31,15 +31,10 @@ from ..messages import (
31
31
  ToolReturnPart,
32
32
  UserPromptPart,
33
33
  )
34
+ from ..providers import Provider, infer_provider
34
35
  from ..settings import ModelSettings
35
36
  from ..tools import ToolDefinition
36
- from . import (
37
- Model,
38
- ModelRequestParameters,
39
- StreamedResponse,
40
- cached_async_http_client,
41
- check_allow_model_requests,
42
- )
37
+ from . import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client, check_allow_model_requests
43
38
 
44
39
  try:
45
40
  from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
@@ -115,12 +110,33 @@ class AnthropicModel(Model):
115
110
  client: AsyncAnthropic = field(repr=False)
116
111
 
117
112
  _model_name: AnthropicModelName = field(repr=False)
118
- _system: str | None = field(default='anthropic', repr=False)
113
+ _system: str = field(default='anthropic', repr=False)
114
+
115
+ @overload
116
+ def __init__(
117
+ self,
118
+ model_name: AnthropicModelName,
119
+ *,
120
+ provider: Literal['anthropic'] | Provider[AsyncAnthropic] = 'anthropic',
121
+ ) -> None: ...
119
122
 
123
+ @deprecated('Use the `provider` parameter instead of `api_key`, `anthropic_client`, and `http_client`.')
124
+ @overload
120
125
  def __init__(
121
126
  self,
122
127
  model_name: AnthropicModelName,
123
128
  *,
129
+ provider: None = None,
130
+ api_key: str | None = None,
131
+ anthropic_client: AsyncAnthropic | None = None,
132
+ http_client: AsyncHTTPClient | None = None,
133
+ ) -> None: ...
134
+
135
+ def __init__(
136
+ self,
137
+ model_name: AnthropicModelName,
138
+ *,
139
+ provider: Literal['anthropic'] | Provider[AsyncAnthropic] | None = None,
124
140
  api_key: str | None = None,
125
141
  anthropic_client: AsyncAnthropic | None = None,
126
142
  http_client: AsyncHTTPClient | None = None,
@@ -130,6 +146,8 @@ class AnthropicModel(Model):
130
146
  Args:
131
147
  model_name: The name of the Anthropic model to use. List of model names available
132
148
  [here](https://docs.anthropic.com/en/docs/about-claude/models).
149
+ provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
150
+ instance of `Provider[AsyncAnthropic]`. If not provided, the other parameters will be used.
133
151
  api_key: The API key to use for authentication, if not provided, the `ANTHROPIC_API_KEY` environment variable
134
152
  will be used if available.
135
153
  anthropic_client: An existing
@@ -138,7 +156,12 @@ class AnthropicModel(Model):
138
156
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
139
157
  """
140
158
  self._model_name = model_name
141
- if anthropic_client is not None:
159
+
160
+ if provider is not None:
161
+ if isinstance(provider, str):
162
+ provider = infer_provider(provider)
163
+ self.client = provider.client
164
+ elif anthropic_client is not None:
142
165
  assert http_client is None, 'Cannot provide both `anthropic_client` and `http_client`'
143
166
  assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
144
167
  self.client = anthropic_client
@@ -183,7 +206,7 @@ class AnthropicModel(Model):
183
206
  return self._model_name
184
207
 
185
208
  @property
186
- def system(self) -> str | None:
209
+ def system(self) -> str:
187
210
  """The system / model provider."""
188
211
  return self._system
189
212
 
@@ -355,8 +378,17 @@ class AnthropicModel(Model):
355
378
  source={'data': io.BytesIO(item.data), 'media_type': item.media_type, 'type': 'base64'}, # type: ignore
356
379
  type='image',
357
380
  )
381
+ elif item.media_type == 'application/pdf':
382
+ yield DocumentBlockParam(
383
+ source=Base64PDFSourceParam(
384
+ data=io.BytesIO(item.data),
385
+ media_type='application/pdf',
386
+ type='base64',
387
+ ),
388
+ type='document',
389
+ )
358
390
  else:
359
- raise RuntimeError('Only images are supported for binary content')
391
+ raise RuntimeError('Only images and PDFs are supported for binary content')
360
392
  elif isinstance(item, ImageUrl):
361
393
  try:
362
394
  response = await cached_async_http_client().get(item.url)
@@ -119,7 +119,7 @@ class BedrockConverseModel(Model):
119
119
  client: BedrockRuntimeClient
120
120
 
121
121
  _model_name: BedrockModelName = field(repr=False)
122
- _system: str | None = field(default='bedrock', repr=False)
122
+ _system: str = field(default='bedrock', repr=False)
123
123
 
124
124
  @property
125
125
  def model_name(self) -> str:
@@ -127,7 +127,7 @@ class BedrockConverseModel(Model):
127
127
  return self._model_name
128
128
 
129
129
  @property
130
- def system(self) -> str | None:
130
+ def system(self) -> str:
131
131
  """The system / model provider, ex: openai."""
132
132
  return self._system
133
133
 
@@ -98,7 +98,7 @@ class CohereModel(Model):
98
98
  client: AsyncClientV2 = field(repr=False)
99
99
 
100
100
  _model_name: CohereModelName = field(repr=False)
101
- _system: str | None = field(default='cohere', repr=False)
101
+ _system: str = field(default='cohere', repr=False)
102
102
 
103
103
  def __init__(
104
104
  self,
@@ -148,7 +148,7 @@ class CohereModel(Model):
148
148
  return self._model_name
149
149
 
150
150
  @property
151
- def system(self) -> str | None:
151
+ def system(self) -> str:
152
152
  """The system / model provider."""
153
153
  return self._system
154
154
 
@@ -1,10 +1,14 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  from collections.abc import AsyncIterator
4
- from contextlib import AsyncExitStack, asynccontextmanager
4
+ from contextlib import AsyncExitStack, asynccontextmanager, suppress
5
5
  from dataclasses import dataclass, field
6
6
  from typing import TYPE_CHECKING, Callable
7
7
 
8
+ from opentelemetry.trace import get_current_span
9
+
10
+ from pydantic_ai.models.instrumented import InstrumentedModel
11
+
8
12
  from ..exceptions import FallbackExceptionGroup, ModelHTTPError
9
13
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model
10
14
 
@@ -40,7 +44,6 @@ class FallbackModel(Model):
40
44
  fallback_on: A callable or tuple of exceptions that should trigger a fallback.
41
45
  """
42
46
  self.models = [infer_model(default_model), *[infer_model(m) for m in fallback_models]]
43
- self._model_name = f'FallBackModel[{", ".join(model.model_name for model in self.models)}]'
44
47
 
45
48
  if isinstance(fallback_on, tuple):
46
49
  self._fallback_on = _default_fallback_condition_factory(fallback_on)
@@ -62,14 +65,15 @@ class FallbackModel(Model):
62
65
  for model in self.models:
63
66
  try:
64
67
  response, usage = await model.request(messages, model_settings, model_request_parameters)
65
- response.model_used = model # type: ignore
66
- return response, usage
67
68
  except Exception as exc:
68
69
  if self._fallback_on(exc):
69
70
  exceptions.append(exc)
70
71
  continue
71
72
  raise exc
72
73
 
74
+ self._set_span_attributes(model)
75
+ return response, usage
76
+
73
77
  raise FallbackExceptionGroup('All models from FallbackModel failed', exceptions)
74
78
 
75
79
  @asynccontextmanager
@@ -93,20 +97,29 @@ class FallbackModel(Model):
93
97
  exceptions.append(exc)
94
98
  continue
95
99
  raise exc
100
+
101
+ self._set_span_attributes(model)
96
102
  yield response
97
103
  return
98
104
 
99
105
  raise FallbackExceptionGroup('All models from FallbackModel failed', exceptions)
100
106
 
107
+ def _set_span_attributes(self, model: Model):
108
+ with suppress(Exception):
109
+ span = get_current_span()
110
+ if span.is_recording():
111
+ attributes = getattr(span, 'attributes', {})
112
+ if attributes.get('gen_ai.request.model') == self.model_name:
113
+ span.set_attributes(InstrumentedModel.model_attributes(model))
114
+
101
115
  @property
102
116
  def model_name(self) -> str:
103
117
  """The model name."""
104
- return self._model_name
118
+ return f'fallback:{",".join(model.model_name for model in self.models)}'
105
119
 
106
120
  @property
107
- def system(self) -> str | None:
108
- """The system / model provider, n/a for fallback models."""
109
- return None
121
+ def system(self) -> str:
122
+ return f'fallback:{",".join(model.system for model in self.models)}'
110
123
 
111
124
  @property
112
125
  def base_url(self) -> str | None:
@@ -45,7 +45,7 @@ class FunctionModel(Model):
45
45
  stream_function: StreamFunctionDef | None = None
46
46
 
47
47
  _model_name: str = field(repr=False)
48
- _system: str | None = field(default=None, repr=False)
48
+ _system: str = field(default='function', repr=False)
49
49
 
50
50
  @overload
51
51
  def __init__(self, function: FunctionDef, *, model_name: str | None = None) -> None: ...
@@ -140,7 +140,7 @@ class FunctionModel(Model):
140
140
  return self._model_name
141
141
 
142
142
  @property
143
- def system(self) -> str | None:
143
+ def system(self) -> str:
144
144
  """The system / model provider."""
145
145
  return self._system
146
146
 
@@ -91,7 +91,7 @@ class GeminiModel(Model):
91
91
  _provider: Literal['google-gla', 'google-vertex'] | Provider[AsyncHTTPClient] | None = field(repr=False)
92
92
  _auth: AuthProtocol | None = field(repr=False)
93
93
  _url: str | None = field(repr=False)
94
- _system: str | None = field(default='google-gla', repr=False)
94
+ _system: str = field(default='gemini', repr=False)
95
95
 
96
96
  @overload
97
97
  def __init__(
@@ -139,11 +139,9 @@ class GeminiModel(Model):
139
139
 
140
140
  if provider is not None:
141
141
  if isinstance(provider, str):
142
- self._system = provider
143
- self.client = infer_provider(provider).client
144
- else:
145
- self._system = provider.name
146
- self.client = provider.client
142
+ provider = infer_provider(provider)
143
+ self._system = provider.name
144
+ self.client = provider.client
147
145
  self._url = str(self.client.base_url)
148
146
  else:
149
147
  if api_key is None:
@@ -197,7 +195,7 @@ class GeminiModel(Model):
197
195
  return self._model_name
198
196
 
199
197
  @property
200
- def system(self) -> str | None:
198
+ def system(self) -> str:
201
199
  """The system / model provider."""
202
200
  return self._system
203
201
 
@@ -88,7 +88,7 @@ class GroqModel(Model):
88
88
  client: AsyncGroq = field(repr=False)
89
89
 
90
90
  _model_name: GroqModelName = field(repr=False)
91
- _system: str | None = field(default='groq', repr=False)
91
+ _system: str = field(default='groq', repr=False)
92
92
 
93
93
  @overload
94
94
  def __init__(
@@ -138,9 +138,8 @@ class GroqModel(Model):
138
138
 
139
139
  if provider is not None:
140
140
  if isinstance(provider, str):
141
- self.client = infer_provider(provider).client
142
- else:
143
- self.client = provider.client
141
+ provider = infer_provider(provider)
142
+ self.client = provider.client
144
143
  elif groq_client is not None:
145
144
  assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
146
145
  assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
@@ -186,7 +185,7 @@ class GroqModel(Model):
186
185
  return self._model_name
187
186
 
188
187
  @property
189
- def system(self) -> str | None:
188
+ def system(self) -> str:
190
189
  """The system / model provider."""
191
190
  return self._system
192
191
 
@@ -175,11 +175,7 @@ class InstrumentedModel(WrapperModel):
175
175
  )
176
176
  )
177
177
  new_attributes: dict[str, AttributeValue] = usage.opentelemetry_attributes() # type: ignore
178
- if model_used := getattr(response, 'model_used', None):
179
- # FallbackModel sets model_used on the response so that we can report the attributes
180
- # of the model that was actually used.
181
- new_attributes.update(self.model_attributes(model_used))
182
- attributes.update(new_attributes)
178
+ attributes.update(getattr(span, 'attributes', {}))
183
179
  request_model = attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]
184
180
  new_attributes['gen_ai.response.model'] = response.model_name or request_model
185
181
  span.set_attributes(new_attributes)
@@ -213,10 +209,8 @@ class InstrumentedModel(WrapperModel):
213
209
 
214
210
  @staticmethod
215
211
  def model_attributes(model: Model):
216
- system = getattr(model, 'system', '') or model.__class__.__name__.removesuffix('Model').lower()
217
- system = {'google-gla': 'gemini', 'google-vertex': 'vertex_ai', 'mistral': 'mistral_ai'}.get(system, system)
218
212
  attributes: dict[str, AttributeValue] = {
219
- GEN_AI_SYSTEM_ATTRIBUTE: system,
213
+ GEN_AI_SYSTEM_ATTRIBUTE: model.system,
220
214
  GEN_AI_REQUEST_MODEL_ATTRIBUTE: model.model_name,
221
215
  }
222
216
  if base_url := model.base_url:
@@ -7,11 +7,11 @@ from contextlib import asynccontextmanager
7
7
  from dataclasses import dataclass, field
8
8
  from datetime import datetime, timezone
9
9
  from itertools import chain
10
- from typing import Any, Callable, Literal, Union, cast
10
+ from typing import Any, Callable, Literal, Union, cast, overload
11
11
 
12
12
  import pydantic_core
13
13
  from httpx import AsyncClient as AsyncHTTPClient, Timeout
14
- from typing_extensions import assert_never
14
+ from typing_extensions import assert_never, deprecated
15
15
 
16
16
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils
17
17
  from .._utils import now_utc as _now_utc
@@ -31,6 +31,7 @@ from ..messages import (
31
31
  ToolReturnPart,
32
32
  UserPromptPart,
33
33
  )
34
+ from ..providers import Provider, infer_provider
34
35
  from ..result import Usage
35
36
  from ..settings import ModelSettings
36
37
  from ..tools import ToolDefinition
@@ -110,12 +111,35 @@ class MistralModel(Model):
110
111
  json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n"""
111
112
 
112
113
  _model_name: MistralModelName = field(repr=False)
113
- _system: str | None = field(default='mistral', repr=False)
114
+ _system: str = field(default='mistral_ai', repr=False)
114
115
 
116
+ @overload
115
117
  def __init__(
116
118
  self,
117
119
  model_name: MistralModelName,
118
120
  *,
121
+ provider: Literal['mistral'] | Provider[Mistral] = 'mistral',
122
+ json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",
123
+ ) -> None: ...
124
+
125
+ @overload
126
+ @deprecated('Use the `provider` parameter instead of `api_key`, `client` and `http_client`.')
127
+ def __init__(
128
+ self,
129
+ model_name: MistralModelName,
130
+ *,
131
+ provider: None = None,
132
+ api_key: str | Callable[[], str | None] | None = None,
133
+ client: Mistral | None = None,
134
+ http_client: AsyncHTTPClient | None = None,
135
+ json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n""",
136
+ ) -> None: ...
137
+
138
+ def __init__(
139
+ self,
140
+ model_name: MistralModelName,
141
+ *,
142
+ provider: Literal['mistral'] | Provider[Mistral] | None = None,
119
143
  api_key: str | Callable[[], str | None] | None = None,
120
144
  client: Mistral | None = None,
121
145
  http_client: AsyncHTTPClient | None = None,
@@ -124,6 +148,9 @@ class MistralModel(Model):
124
148
  """Initialize a Mistral model.
125
149
 
126
150
  Args:
151
+ provider: The provider to use for authentication and API access. Can be either the string
152
+ 'mistral' or an instance of `Provider[Mistral]`. If not provided, a new provider will be
153
+ created using the other parameters.
127
154
  model_name: The name of the model to use.
128
155
  api_key: The API key to use for authentication, if unset uses `MISTRAL_API_KEY` environment variable.
129
156
  client: An existing `Mistral` client to use, if provided, `api_key` and `http_client` must be `None`.
@@ -133,17 +160,22 @@ class MistralModel(Model):
133
160
  self._model_name = model_name
134
161
  self.json_mode_schema_prompt = json_mode_schema_prompt
135
162
 
136
- if client is not None:
163
+ if provider is not None:
164
+ if isinstance(provider, str):
165
+ # TODO(Marcelo): We should add an integration test with VCR when I get the API key.
166
+ provider = infer_provider(provider) # pragma: no cover
167
+ self.client = provider.client
168
+ elif client is not None:
137
169
  assert http_client is None, 'Cannot provide both `mistral_client` and `http_client`'
138
170
  assert api_key is None, 'Cannot provide both `mistral_client` and `api_key`'
139
171
  self.client = client
140
172
  else:
141
- api_key = os.getenv('MISTRAL_API_KEY') if api_key is None else api_key
173
+ api_key = api_key or os.getenv('MISTRAL_API_KEY')
142
174
  self.client = Mistral(api_key=api_key, async_client=http_client or cached_async_http_client())
143
175
 
144
176
  @property
145
177
  def base_url(self) -> str:
146
- return str(self.client.sdk_configuration.get_server_details()[0])
178
+ return self.client.sdk_configuration.get_server_details()[0]
147
179
 
148
180
  async def request(
149
181
  self,
@@ -179,7 +211,7 @@ class MistralModel(Model):
179
211
  return self._model_name
180
212
 
181
213
  @property
182
- def system(self) -> str | None:
214
+ def system(self) -> str:
183
215
  """The system / model provider."""
184
216
  return self._system
185
217
 
@@ -99,16 +99,16 @@ class OpenAIModel(Model):
99
99
  system_prompt_role: OpenAISystemPromptRole | None = field(default=None)
100
100
 
101
101
  _model_name: OpenAIModelName = field(repr=False)
102
- _system: str | None = field(repr=False)
102
+ _system: str = field(repr=False)
103
103
 
104
104
  @overload
105
105
  def __init__(
106
106
  self,
107
107
  model_name: OpenAIModelName,
108
108
  *,
109
- provider: Literal['openai', 'deepseek'] | Provider[AsyncOpenAI] = 'openai',
109
+ provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] = 'openai',
110
110
  system_prompt_role: OpenAISystemPromptRole | None = None,
111
- system: str | None = 'openai',
111
+ system: str = 'openai',
112
112
  ) -> None: ...
113
113
 
114
114
  @deprecated('Use the `provider` parameter instead of `base_url`, `api_key`, `openai_client` and `http_client`.')
@@ -123,20 +123,20 @@ class OpenAIModel(Model):
123
123
  openai_client: AsyncOpenAI | None = None,
124
124
  http_client: AsyncHTTPClient | None = None,
125
125
  system_prompt_role: OpenAISystemPromptRole | None = None,
126
- system: str | None = 'openai',
126
+ system: str = 'openai',
127
127
  ) -> None: ...
128
128
 
129
129
  def __init__(
130
130
  self,
131
131
  model_name: OpenAIModelName,
132
132
  *,
133
- provider: Literal['openai', 'deepseek'] | Provider[AsyncOpenAI] | None = None,
133
+ provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] | None = None,
134
134
  base_url: str | None = None,
135
135
  api_key: str | None = None,
136
136
  openai_client: AsyncOpenAI | None = None,
137
137
  http_client: AsyncHTTPClient | None = None,
138
138
  system_prompt_role: OpenAISystemPromptRole | None = None,
139
- system: str | None = 'openai',
139
+ system: str = 'openai',
140
140
  ):
141
141
  """Initialize an OpenAI model.
142
142
 
@@ -162,9 +162,8 @@ class OpenAIModel(Model):
162
162
 
163
163
  if provider is not None:
164
164
  if isinstance(provider, str):
165
- self.client = infer_provider(provider).client
166
- else:
167
- self.client = provider.client
165
+ provider = infer_provider(provider)
166
+ self.client = provider.client
168
167
  else: # pragma: no cover
169
168
  # This is a workaround for the OpenAI client requiring an API key, whilst locally served,
170
169
  # openai compatible models do not always need an API key, but a placeholder (non-empty) key is required.
@@ -224,7 +223,7 @@ class OpenAIModel(Model):
224
223
  return self._model_name
225
224
 
226
225
  @property
227
- def system(self) -> str | None:
226
+ def system(self) -> str:
228
227
  """The system / model provider."""
229
228
  return self._system
230
229
 
@@ -79,7 +79,7 @@ class TestModel(Model):
79
79
  This is set when a request is made, so will reflect the function tools from the last step of the last run.
80
80
  """
81
81
  _model_name: str = field(default='test', repr=False)
82
- _system: str | None = field(default=None, repr=False)
82
+ _system: str = field(default='test', repr=False)
83
83
 
84
84
  async def request(
85
85
  self,
@@ -113,7 +113,7 @@ class TestModel(Model):
113
113
  return self._model_name
114
114
 
115
115
  @property
116
- def system(self) -> str | None:
116
+ def system(self) -> str:
117
117
  """The system / model provider."""
118
118
  return self._system
119
119
 
@@ -69,7 +69,7 @@ class VertexAIModel(GeminiModel):
69
69
  url_template: str
70
70
 
71
71
  _model_name: GeminiModelName = field(repr=False)
72
- _system: str | None = field(default='google-vertex', repr=False)
72
+ _system: str = field(default='vertex_ai', repr=False)
73
73
 
74
74
  # TODO __init__ can be removed once we drop 3.9 and we can set kw_only correctly on the dataclass
75
75
  def __init__(
@@ -175,7 +175,7 @@ class VertexAIModel(GeminiModel):
175
175
  return self._model_name
176
176
 
177
177
  @property
178
- def system(self) -> str | None:
178
+ def system(self) -> str:
179
179
  """The system / model provider."""
180
180
  return self._system
181
181
 
@@ -38,7 +38,7 @@ class WrapperModel(Model):
38
38
  return self.wrapped.model_name
39
39
 
40
40
  @property
41
- def system(self) -> str | None:
41
+ def system(self) -> str:
42
42
  return self.wrapped.system
43
43
 
44
44
  def __getattr__(self, item: str):
@@ -69,5 +69,13 @@ def infer_provider(provider: str) -> Provider[Any]:
69
69
  from .groq import GroqProvider
70
70
 
71
71
  return GroqProvider()
72
+ elif provider == 'anthropic':
73
+ from .anthropic import AnthropicProvider
74
+
75
+ return AnthropicProvider()
76
+ elif provider == 'mistral':
77
+ from .mistral import MistralProvider
78
+
79
+ return MistralProvider()
72
80
  else: # pragma: no cover
73
81
  raise ValueError(f'Unknown provider: {provider}')
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+ from typing import overload
5
+
6
+ import httpx
7
+
8
+ from pydantic_ai.models import cached_async_http_client
9
+
10
+ try:
11
+ from anthropic import AsyncAnthropic
12
+ except ImportError as _import_error: # pragma: no cover
13
+ raise ImportError(
14
+ 'Please install the `anthropic` package to use the Anthropic provider, '
15
+ "you can use the `anthropic` optional group — `pip install 'pydantic-ai-slim[anthropic]'`"
16
+ ) from _import_error
17
+
18
+
19
+ from . import Provider
20
+
21
+
22
+ class AnthropicProvider(Provider[AsyncAnthropic]):
23
+ """Provider for Anthropic API."""
24
+
25
+ @property
26
+ def name(self) -> str:
27
+ return 'anthropic'
28
+
29
+ @property
30
+ def base_url(self) -> str:
31
+ return str(self._client.base_url)
32
+
33
+ @property
34
+ def client(self) -> AsyncAnthropic:
35
+ return self._client
36
+
37
+ @overload
38
+ def __init__(self, *, anthropic_client: AsyncAnthropic | None = None) -> None: ...
39
+
40
+ @overload
41
+ def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ api_key: str | None = None,
47
+ anthropic_client: AsyncAnthropic | None = None,
48
+ http_client: httpx.AsyncClient | None = None,
49
+ ) -> None:
50
+ """Create a new Anthropic provider.
51
+
52
+ Args:
53
+ api_key: The API key to use for authentication, if not provided, the `ANTHROPIC_API_KEY` environment variable
54
+ will be used if available.
55
+ anthropic_client: An existing [`AsyncAnthropic`](https://github.com/anthropics/anthropic-sdk-python)
56
+ client to use. If provided, the `api_key` and `http_client` arguments will be ignored.
57
+ http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
58
+ """
59
+ if anthropic_client is not None:
60
+ assert http_client is None, 'Cannot provide both `anthropic_client` and `http_client`'
61
+ assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
62
+ self._client = anthropic_client
63
+ else:
64
+ api_key = api_key or os.environ.get('ANTHROPIC_API_KEY')
65
+ if api_key is None:
66
+ raise ValueError(
67
+ 'Set the `ANTHROPIC_API_KEY` environment variable or pass it via `AnthropicProvider(api_key=...)`'
68
+ 'to use the Anthropic provider.'
69
+ )
70
+
71
+ if http_client is not None:
72
+ self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
73
+ else:
74
+ self._client = AsyncAnthropic(api_key=api_key, http_client=cached_async_http_client())
@@ -0,0 +1,108 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+ from typing import overload
5
+
6
+ import httpx
7
+ from openai import AsyncOpenAI
8
+
9
+ from pydantic_ai.models import cached_async_http_client
10
+
11
+ try:
12
+ from openai import AsyncAzureOpenAI
13
+ except ImportError as _import_error: # pragma: no cover
14
+ raise ImportError(
15
+ 'Please install the `openai` package to use the Azure provider, '
16
+ "you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
17
+ ) from _import_error
18
+
19
+
20
+ from . import Provider
21
+
22
+
23
+ class AzureProvider(Provider[AsyncOpenAI]):
24
+ """Provider for Azure OpenAI API.
25
+
26
+ See <https://azure.microsoft.com/en-us/products/ai-foundry> for more information.
27
+ """
28
+
29
+ @property
30
+ def name(self) -> str:
31
+ return 'azure'
32
+
33
+ @property
34
+ def base_url(self) -> str:
35
+ assert self._base_url is not None
36
+ return self._base_url
37
+
38
+ @property
39
+ def client(self) -> AsyncOpenAI:
40
+ return self._client
41
+
42
+ @overload
43
+ def __init__(self, *, openai_client: AsyncAzureOpenAI) -> None: ...
44
+
45
+ @overload
46
+ def __init__(
47
+ self,
48
+ *,
49
+ azure_endpoint: str | None = None,
50
+ api_version: str | None = None,
51
+ api_key: str | None = None,
52
+ http_client: httpx.AsyncClient | None = None,
53
+ ) -> None: ...
54
+
55
+ def __init__(
56
+ self,
57
+ *,
58
+ azure_endpoint: str | None = None,
59
+ api_version: str | None = None,
60
+ api_key: str | None = None,
61
+ openai_client: AsyncAzureOpenAI | None = None,
62
+ http_client: httpx.AsyncClient | None = None,
63
+ ) -> None:
64
+ """Create a new Azure provider.
65
+
66
+ Args:
67
+ azure_endpoint: The Azure endpoint to use for authentication, if not provided, the `AZURE_OPENAI_ENDPOINT`
68
+ environment variable will be used if available.
69
+ api_version: The API version to use for authentication, if not provided, the `OPENAI_API_VERSION`
70
+ environment variable will be used if available.
71
+ api_key: The API key to use for authentication, if not provided, the `AZURE_OPENAI_API_KEY` environment variable
72
+ will be used if available.
73
+ openai_client: An existing
74
+ [`AsyncAzureOpenAI`](https://github.com/openai/openai-python#microsoft-azure-openai)
75
+ client to use. If provided, `base_url`, `api_key`, and `http_client` must be `None`.
76
+ http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
77
+ """
78
+ if openai_client is not None:
79
+ assert azure_endpoint is None, 'Cannot provide both `openai_client` and `azure_endpoint`'
80
+ assert http_client is None, 'Cannot provide both `openai_client` and `http_client`'
81
+ assert api_key is None, 'Cannot provide both `openai_client` and `api_key`'
82
+ self._base_url = str(openai_client.base_url)
83
+ self._client = openai_client
84
+ else:
85
+ azure_endpoint = azure_endpoint or os.getenv('AZURE_OPENAI_ENDPOINT')
86
+ if azure_endpoint is None: # pragma: no cover
87
+ raise ValueError(
88
+ 'Must provide one of the `azure_endpoint` argument or the `AZURE_OPENAI_ENDPOINT` environment variable'
89
+ )
90
+
91
+ if api_key is None and 'OPENAI_API_KEY' not in os.environ: # pragma: no cover
92
+ raise ValueError(
93
+ 'Must provide one of the `api_key` argument or the `OPENAI_API_KEY` environment variable'
94
+ )
95
+
96
+ if api_version is None and 'OPENAI_API_VERSION' not in os.environ: # pragma: no cover
97
+ raise ValueError(
98
+ 'Must provide one of the `api_version` argument or the `OPENAI_API_VERSION` environment variable'
99
+ )
100
+
101
+ http_client = http_client or cached_async_http_client()
102
+ self._client = AsyncAzureOpenAI(
103
+ azure_endpoint=azure_endpoint,
104
+ api_key=api_key,
105
+ api_version=api_version,
106
+ http_client=http_client,
107
+ )
108
+ self._base_url = str(self._client.base_url)
@@ -10,7 +10,7 @@ try:
10
10
  from botocore.exceptions import NoRegionError
11
11
  except ImportError as _import_error:
12
12
  raise ImportError(
13
- 'Please install `boto3` to use the Bedrock provider, '
13
+ 'Please install the `boto3` package to use the Bedrock provider, '
14
14
  "you can use the `bedrock` optional group — `pip install 'pydantic-ai-slim[bedrock]'`"
15
15
  ) from _import_error
16
16
 
@@ -12,7 +12,7 @@ try:
12
12
  from openai import AsyncOpenAI
13
13
  except ImportError as _import_error: # pragma: no cover
14
14
  raise ImportError(
15
- 'Please install `openai` to use the DeepSeek provider, '
15
+ 'Please install the `openai` package to use the DeepSeek provider, '
16
16
  "you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
17
17
  ) from _import_error
18
18
 
@@ -21,7 +21,7 @@ try:
21
21
  from google.oauth2.service_account import Credentials as ServiceAccountCredentials
22
22
  except ImportError as _import_error:
23
23
  raise ImportError(
24
- 'Please install `google-auth` to use the Google Vertex AI provider, '
24
+ 'Please install the `google-auth` package to use the Google Vertex AI provider, '
25
25
  "you can use the `vertexai` optional group — `pip install 'pydantic-ai-slim[vertexai]'`"
26
26
  ) from _import_error
27
27
 
@@ -11,7 +11,7 @@ try:
11
11
  from groq import AsyncGroq
12
12
  except ImportError as _import_error: # pragma: no cover
13
13
  raise ImportError(
14
- 'Please install `groq` to use the Groq provider, '
14
+ 'Please install the `groq` package to use the Groq provider, '
15
15
  "you can use the `groq` optional group — `pip install 'pydantic-ai-slim[groq]'`"
16
16
  ) from _import_error
17
17
 
@@ -66,8 +66,6 @@ class GroqProvider(Provider[AsyncGroq]):
66
66
  )
67
67
 
68
68
  if groq_client is not None:
69
- assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
70
- assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
71
69
  self._client = groq_client
72
70
  elif http_client is not None:
73
71
  self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
@@ -0,0 +1,73 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+ from typing import overload
5
+
6
+ from httpx import AsyncClient as AsyncHTTPClient
7
+
8
+ from pydantic_ai.models import cached_async_http_client
9
+
10
+ try:
11
+ from mistralai import Mistral
12
+ except ImportError as e: # pragma: no cover
13
+ raise ImportError(
14
+ 'Please install the `mistral` package to use the Mistral provider, '
15
+ "you can use the `mistral` optional group — `pip install 'pydantic-ai-slim[mistral]'`"
16
+ ) from e
17
+
18
+
19
+ from . import Provider
20
+
21
+
22
+ class MistralProvider(Provider[Mistral]):
23
+ """Provider for Mistral API."""
24
+
25
+ @property
26
+ def name(self) -> str:
27
+ return 'mistral'
28
+
29
+ @property
30
+ def base_url(self) -> str:
31
+ return self.client.sdk_configuration.get_server_details()[0]
32
+
33
+ @property
34
+ def client(self) -> Mistral:
35
+ return self._client
36
+
37
+ @overload
38
+ def __init__(self, *, mistral_client: Mistral | None = None) -> None: ...
39
+
40
+ @overload
41
+ def __init__(self, *, api_key: str | None = None, http_client: AsyncHTTPClient | None = None) -> None: ...
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ api_key: str | None = None,
47
+ mistral_client: Mistral | None = None,
48
+ http_client: AsyncHTTPClient | None = None,
49
+ ) -> None:
50
+ """Create a new Mistral provider.
51
+
52
+ Args:
53
+ api_key: The API key to use for authentication, if not provided, the `MISTRAL_API_KEY` environment variable
54
+ will be used if available.
55
+ mistral_client: An existing `Mistral` client to use, if provided, `api_key` and `http_client` must be `None`.
56
+ http_client: An existing async client to use for making HTTP requests.
57
+ """
58
+ api_key = api_key or os.environ.get('MISTRAL_API_KEY')
59
+
60
+ if api_key is None and mistral_client is None:
61
+ raise ValueError(
62
+ 'Set the `MISTRAL_API_KEY` environment variable or pass it via `MistralProvider(api_key=...)`'
63
+ 'to use the Mistral provider.'
64
+ )
65
+
66
+ if mistral_client is not None:
67
+ assert http_client is None, 'Cannot provide both `mistral_client` and `http_client`'
68
+ assert api_key is None, 'Cannot provide both `mistral_client` and `api_key`'
69
+ self._client = mistral_client
70
+ elif http_client is not None:
71
+ self._client = Mistral(api_key=api_key, async_client=http_client)
72
+ else:
73
+ self._client = Mistral(api_key=api_key, async_client=cached_async_http_client())
@@ -1,7 +1,6 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import os
4
- from typing import TypeVar
5
4
 
6
5
  import httpx
7
6
 
@@ -11,15 +10,13 @@ try:
11
10
  from openai import AsyncOpenAI
12
11
  except ImportError as _import_error: # pragma: no cover
13
12
  raise ImportError(
14
- 'Please install `openai` to use the OpenAI provider, '
13
+ 'Please install the `openai` package to use the OpenAI provider, '
15
14
  "you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
16
15
  ) from _import_error
17
16
 
18
17
 
19
18
  from . import Provider
20
19
 
21
- InterfaceClient = TypeVar('InterfaceClient')
22
-
23
20
 
24
21
  class OpenAIProvider(Provider[AsyncOpenAI]):
25
22
  """Provider for OpenAI API."""
@@ -55,7 +52,7 @@ class OpenAIProvider(Provider[AsyncOpenAI]):
55
52
  client to use. If provided, `base_url`, `api_key`, and `http_client` must be `None`.
56
53
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
57
54
  """
58
- self._base_url = base_url or 'https://api.openai.com/v1'
55
+ self._base_url = base_url or os.getenv('OPENAI_BASE_URL', 'https://api.openai.com/v1')
59
56
  # This is a workaround for the OpenAI client requiring an API key, whilst locally served,
60
57
  # openai compatible models do not always need an API key, but a placeholder (non-empty) key is required.
61
58
  if api_key is None and 'OPENAI_API_KEY' not in os.environ and openai_client is None:
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pydantic-ai-slim"
7
- version = "0.0.39"
7
+ version = "0.0.41"
8
8
  description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
9
9
  authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
10
10
  license = "MIT"
@@ -36,7 +36,7 @@ dependencies = [
36
36
  "griffe>=1.3.2",
37
37
  "httpx>=0.27",
38
38
  "pydantic>=2.10",
39
- "pydantic-graph==0.0.39",
39
+ "pydantic-graph==0.0.41",
40
40
  "exceptiongroup; python_version < '3.11'",
41
41
  "opentelemetry-api>=1.28.0",
42
42
  "typing-inspection>=0.4.0",
@@ -475,8 +475,8 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
475
475
  start_node,
476
476
  state=state,
477
477
  deps=graph_deps,
478
- infer_name=False,
479
478
  span=use_span(run_span, end_on_exit=True),
479
+ infer_name=False,
480
480
  ) as graph_run:
481
481
  yield AgentRun(graph_run)
482
482