pydantic-ai-slim 0.0.38__tar.gz → 0.0.40__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/PKG-INFO +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/__init__.py +8 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/anthropic.py +13 -10
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/bedrock.py +3 -3
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/cohere.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/fallback.py +16 -8
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/function.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/gemini.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/groq.py +37 -11
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/instrumented.py +2 -8
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/mistral.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/openai.py +7 -7
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/test.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/vertexai.py +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/models/wrapper.py +1 -1
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai_slim-0.0.40/pydantic_ai/providers/azure.py +108 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/bedrock.py +1 -1
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/deepseek.py +1 -1
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/google_vertex.py +1 -1
- pydantic_ai_slim-0.0.40/pydantic_ai/providers/groq.py +73 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/openai.py +2 -5
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pyproject.toml +2 -2
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/.gitignore +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/README.md +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_result.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/agent.py +1 -1
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.0.38 → pydantic_ai_slim-0.0.40}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.40
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.40
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -262,8 +262,14 @@ class Model(ABC):
|
|
|
262
262
|
|
|
263
263
|
@property
|
|
264
264
|
@abstractmethod
|
|
265
|
-
def system(self) -> str
|
|
266
|
-
"""The system / model provider, ex: openai.
|
|
265
|
+
def system(self) -> str:
|
|
266
|
+
"""The system / model provider, ex: openai.
|
|
267
|
+
|
|
268
|
+
Use to populate the `gen_ai.system` OpenTelemetry semantic convention attribute,
|
|
269
|
+
so should use well-known values listed in
|
|
270
|
+
https://opentelemetry.io/docs/specs/semconv/attributes-registry/gen-ai/#gen-ai-system
|
|
271
|
+
when applicable.
|
|
272
|
+
"""
|
|
267
273
|
raise NotImplementedError()
|
|
268
274
|
|
|
269
275
|
@property
|
|
@@ -33,13 +33,7 @@ from ..messages import (
|
|
|
33
33
|
)
|
|
34
34
|
from ..settings import ModelSettings
|
|
35
35
|
from ..tools import ToolDefinition
|
|
36
|
-
from . import
|
|
37
|
-
Model,
|
|
38
|
-
ModelRequestParameters,
|
|
39
|
-
StreamedResponse,
|
|
40
|
-
cached_async_http_client,
|
|
41
|
-
check_allow_model_requests,
|
|
42
|
-
)
|
|
36
|
+
from . import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client, check_allow_model_requests
|
|
43
37
|
|
|
44
38
|
try:
|
|
45
39
|
from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
|
|
@@ -115,7 +109,7 @@ class AnthropicModel(Model):
|
|
|
115
109
|
client: AsyncAnthropic = field(repr=False)
|
|
116
110
|
|
|
117
111
|
_model_name: AnthropicModelName = field(repr=False)
|
|
118
|
-
_system: str
|
|
112
|
+
_system: str = field(default='anthropic', repr=False)
|
|
119
113
|
|
|
120
114
|
def __init__(
|
|
121
115
|
self,
|
|
@@ -183,7 +177,7 @@ class AnthropicModel(Model):
|
|
|
183
177
|
return self._model_name
|
|
184
178
|
|
|
185
179
|
@property
|
|
186
|
-
def system(self) -> str
|
|
180
|
+
def system(self) -> str:
|
|
187
181
|
"""The system / model provider."""
|
|
188
182
|
return self._system
|
|
189
183
|
|
|
@@ -355,8 +349,17 @@ class AnthropicModel(Model):
|
|
|
355
349
|
source={'data': io.BytesIO(item.data), 'media_type': item.media_type, 'type': 'base64'}, # type: ignore
|
|
356
350
|
type='image',
|
|
357
351
|
)
|
|
352
|
+
elif item.media_type == 'application/pdf':
|
|
353
|
+
yield DocumentBlockParam(
|
|
354
|
+
source=Base64PDFSourceParam(
|
|
355
|
+
data=io.BytesIO(item.data),
|
|
356
|
+
media_type='application/pdf',
|
|
357
|
+
type='base64',
|
|
358
|
+
),
|
|
359
|
+
type='document',
|
|
360
|
+
)
|
|
358
361
|
else:
|
|
359
|
-
raise RuntimeError('Only images are supported for binary content')
|
|
362
|
+
raise RuntimeError('Only images and PDFs are supported for binary content')
|
|
360
363
|
elif isinstance(item, ImageUrl):
|
|
361
364
|
try:
|
|
362
365
|
response = await cached_async_http_client().get(item.url)
|
|
@@ -10,7 +10,6 @@ from typing import TYPE_CHECKING, Generic, Literal, Union, cast, overload
|
|
|
10
10
|
|
|
11
11
|
import anyio
|
|
12
12
|
import anyio.to_thread
|
|
13
|
-
from mypy_boto3_bedrock_runtime.type_defs import ImageBlockTypeDef
|
|
14
13
|
from typing_extensions import ParamSpec, assert_never
|
|
15
14
|
|
|
16
15
|
from pydantic_ai import _utils, result
|
|
@@ -46,6 +45,7 @@ if TYPE_CHECKING:
|
|
|
46
45
|
ConverseResponseTypeDef,
|
|
47
46
|
ConverseStreamMetadataEventTypeDef,
|
|
48
47
|
ConverseStreamOutputTypeDef,
|
|
48
|
+
ImageBlockTypeDef,
|
|
49
49
|
InferenceConfigurationTypeDef,
|
|
50
50
|
MessageUnionTypeDef,
|
|
51
51
|
ToolChoiceTypeDef,
|
|
@@ -119,7 +119,7 @@ class BedrockConverseModel(Model):
|
|
|
119
119
|
client: BedrockRuntimeClient
|
|
120
120
|
|
|
121
121
|
_model_name: BedrockModelName = field(repr=False)
|
|
122
|
-
_system: str
|
|
122
|
+
_system: str = field(default='bedrock', repr=False)
|
|
123
123
|
|
|
124
124
|
@property
|
|
125
125
|
def model_name(self) -> str:
|
|
@@ -127,7 +127,7 @@ class BedrockConverseModel(Model):
|
|
|
127
127
|
return self._model_name
|
|
128
128
|
|
|
129
129
|
@property
|
|
130
|
-
def system(self) -> str
|
|
130
|
+
def system(self) -> str:
|
|
131
131
|
"""The system / model provider, ex: openai."""
|
|
132
132
|
return self._system
|
|
133
133
|
|
|
@@ -98,7 +98,7 @@ class CohereModel(Model):
|
|
|
98
98
|
client: AsyncClientV2 = field(repr=False)
|
|
99
99
|
|
|
100
100
|
_model_name: CohereModelName = field(repr=False)
|
|
101
|
-
_system: str
|
|
101
|
+
_system: str = field(default='cohere', repr=False)
|
|
102
102
|
|
|
103
103
|
def __init__(
|
|
104
104
|
self,
|
|
@@ -148,7 +148,7 @@ class CohereModel(Model):
|
|
|
148
148
|
return self._model_name
|
|
149
149
|
|
|
150
150
|
@property
|
|
151
|
-
def system(self) -> str
|
|
151
|
+
def system(self) -> str:
|
|
152
152
|
"""The system / model provider."""
|
|
153
153
|
return self._system
|
|
154
154
|
|
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
from collections.abc import AsyncIterator
|
|
4
|
-
from contextlib import AsyncExitStack, asynccontextmanager
|
|
4
|
+
from contextlib import AsyncExitStack, asynccontextmanager, suppress
|
|
5
5
|
from dataclasses import dataclass, field
|
|
6
6
|
from typing import TYPE_CHECKING, Callable
|
|
7
7
|
|
|
8
|
+
from opentelemetry.trace import get_current_span
|
|
9
|
+
|
|
10
|
+
from pydantic_ai.models.instrumented import InstrumentedModel
|
|
11
|
+
|
|
8
12
|
from ..exceptions import FallbackExceptionGroup, ModelHTTPError
|
|
9
13
|
from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model
|
|
10
14
|
|
|
@@ -40,7 +44,6 @@ class FallbackModel(Model):
|
|
|
40
44
|
fallback_on: A callable or tuple of exceptions that should trigger a fallback.
|
|
41
45
|
"""
|
|
42
46
|
self.models = [infer_model(default_model), *[infer_model(m) for m in fallback_models]]
|
|
43
|
-
self._model_name = f'FallBackModel[{", ".join(model.model_name for model in self.models)}]'
|
|
44
47
|
|
|
45
48
|
if isinstance(fallback_on, tuple):
|
|
46
49
|
self._fallback_on = _default_fallback_condition_factory(fallback_on)
|
|
@@ -62,13 +65,19 @@ class FallbackModel(Model):
|
|
|
62
65
|
for model in self.models:
|
|
63
66
|
try:
|
|
64
67
|
response, usage = await model.request(messages, model_settings, model_request_parameters)
|
|
65
|
-
response.model_used = model # type: ignore
|
|
66
|
-
return response, usage
|
|
67
68
|
except Exception as exc:
|
|
68
69
|
if self._fallback_on(exc):
|
|
69
70
|
exceptions.append(exc)
|
|
70
71
|
continue
|
|
71
72
|
raise exc
|
|
73
|
+
else:
|
|
74
|
+
with suppress(Exception):
|
|
75
|
+
span = get_current_span()
|
|
76
|
+
if span.is_recording():
|
|
77
|
+
attributes = getattr(span, 'attributes', {})
|
|
78
|
+
if attributes.get('gen_ai.request.model') == self.model_name:
|
|
79
|
+
span.set_attributes(InstrumentedModel.model_attributes(model))
|
|
80
|
+
return response, usage
|
|
72
81
|
|
|
73
82
|
raise FallbackExceptionGroup('All models from FallbackModel failed', exceptions)
|
|
74
83
|
|
|
@@ -101,12 +110,11 @@ class FallbackModel(Model):
|
|
|
101
110
|
@property
|
|
102
111
|
def model_name(self) -> str:
|
|
103
112
|
"""The model name."""
|
|
104
|
-
return self.
|
|
113
|
+
return f'fallback:{",".join(model.model_name for model in self.models)}'
|
|
105
114
|
|
|
106
115
|
@property
|
|
107
|
-
def system(self) -> str
|
|
108
|
-
""
|
|
109
|
-
return None
|
|
116
|
+
def system(self) -> str:
|
|
117
|
+
return f'fallback:{",".join(model.system for model in self.models)}'
|
|
110
118
|
|
|
111
119
|
@property
|
|
112
120
|
def base_url(self) -> str | None:
|
|
@@ -45,7 +45,7 @@ class FunctionModel(Model):
|
|
|
45
45
|
stream_function: StreamFunctionDef | None = None
|
|
46
46
|
|
|
47
47
|
_model_name: str = field(repr=False)
|
|
48
|
-
_system: str
|
|
48
|
+
_system: str = field(default='function', repr=False)
|
|
49
49
|
|
|
50
50
|
@overload
|
|
51
51
|
def __init__(self, function: FunctionDef, *, model_name: str | None = None) -> None: ...
|
|
@@ -140,7 +140,7 @@ class FunctionModel(Model):
|
|
|
140
140
|
return self._model_name
|
|
141
141
|
|
|
142
142
|
@property
|
|
143
|
-
def system(self) -> str
|
|
143
|
+
def system(self) -> str:
|
|
144
144
|
"""The system / model provider."""
|
|
145
145
|
return self._system
|
|
146
146
|
|
|
@@ -91,7 +91,7 @@ class GeminiModel(Model):
|
|
|
91
91
|
_provider: Literal['google-gla', 'google-vertex'] | Provider[AsyncHTTPClient] | None = field(repr=False)
|
|
92
92
|
_auth: AuthProtocol | None = field(repr=False)
|
|
93
93
|
_url: str | None = field(repr=False)
|
|
94
|
-
_system: str
|
|
94
|
+
_system: str = field(default='gemini', repr=False)
|
|
95
95
|
|
|
96
96
|
@overload
|
|
97
97
|
def __init__(
|
|
@@ -197,7 +197,7 @@ class GeminiModel(Model):
|
|
|
197
197
|
return self._model_name
|
|
198
198
|
|
|
199
199
|
@property
|
|
200
|
-
def system(self) -> str
|
|
200
|
+
def system(self) -> str:
|
|
201
201
|
"""The system / model provider."""
|
|
202
202
|
return self._system
|
|
203
203
|
|
|
@@ -9,7 +9,7 @@ from itertools import chain
|
|
|
9
9
|
from typing import Literal, Union, cast, overload
|
|
10
10
|
|
|
11
11
|
from httpx import AsyncClient as AsyncHTTPClient
|
|
12
|
-
from typing_extensions import assert_never
|
|
12
|
+
from typing_extensions import assert_never, deprecated
|
|
13
13
|
|
|
14
14
|
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
15
15
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
@@ -29,15 +29,10 @@ from ..messages import (
|
|
|
29
29
|
ToolReturnPart,
|
|
30
30
|
UserPromptPart,
|
|
31
31
|
)
|
|
32
|
+
from ..providers import Provider, infer_provider
|
|
32
33
|
from ..settings import ModelSettings
|
|
33
34
|
from ..tools import ToolDefinition
|
|
34
|
-
from . import
|
|
35
|
-
Model,
|
|
36
|
-
ModelRequestParameters,
|
|
37
|
-
StreamedResponse,
|
|
38
|
-
cached_async_http_client,
|
|
39
|
-
check_allow_model_requests,
|
|
40
|
-
)
|
|
35
|
+
from . import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client, check_allow_model_requests
|
|
41
36
|
|
|
42
37
|
try:
|
|
43
38
|
from groq import NOT_GIVEN, APIStatusError, AsyncGroq, AsyncStream
|
|
@@ -49,6 +44,7 @@ except ImportError as _import_error:
|
|
|
49
44
|
"you can use the `groq` optional group — `pip install 'pydantic-ai-slim[groq]'`"
|
|
50
45
|
) from _import_error
|
|
51
46
|
|
|
47
|
+
|
|
52
48
|
LatestGroqModelNames = Literal[
|
|
53
49
|
'llama-3.3-70b-versatile',
|
|
54
50
|
'llama-3.3-70b-specdec',
|
|
@@ -92,12 +88,33 @@ class GroqModel(Model):
|
|
|
92
88
|
client: AsyncGroq = field(repr=False)
|
|
93
89
|
|
|
94
90
|
_model_name: GroqModelName = field(repr=False)
|
|
95
|
-
_system: str
|
|
91
|
+
_system: str = field(default='groq', repr=False)
|
|
92
|
+
|
|
93
|
+
@overload
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
model_name: GroqModelName,
|
|
97
|
+
*,
|
|
98
|
+
provider: Literal['groq'] | Provider[AsyncGroq] = 'groq',
|
|
99
|
+
) -> None: ...
|
|
100
|
+
|
|
101
|
+
@deprecated('Use the `provider` parameter instead of `api_key`, `groq_client`, and `http_client`.')
|
|
102
|
+
@overload
|
|
103
|
+
def __init__(
|
|
104
|
+
self,
|
|
105
|
+
model_name: GroqModelName,
|
|
106
|
+
*,
|
|
107
|
+
provider: None = None,
|
|
108
|
+
api_key: str | None = None,
|
|
109
|
+
groq_client: AsyncGroq | None = None,
|
|
110
|
+
http_client: AsyncHTTPClient | None = None,
|
|
111
|
+
) -> None: ...
|
|
96
112
|
|
|
97
113
|
def __init__(
|
|
98
114
|
self,
|
|
99
115
|
model_name: GroqModelName,
|
|
100
116
|
*,
|
|
117
|
+
provider: Literal['groq'] | Provider[AsyncGroq] | None = None,
|
|
101
118
|
api_key: str | None = None,
|
|
102
119
|
groq_client: AsyncGroq | None = None,
|
|
103
120
|
http_client: AsyncHTTPClient | None = None,
|
|
@@ -107,6 +124,9 @@ class GroqModel(Model):
|
|
|
107
124
|
Args:
|
|
108
125
|
model_name: The name of the Groq model to use. List of model names available
|
|
109
126
|
[here](https://console.groq.com/docs/models).
|
|
127
|
+
provider: The provider to use for authentication and API access. Can be either the string
|
|
128
|
+
'groq' or an instance of `Provider[AsyncGroq]`. If not provided, a new provider will be
|
|
129
|
+
created using the other parameters.
|
|
110
130
|
api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
|
|
111
131
|
will be used if available.
|
|
112
132
|
groq_client: An existing
|
|
@@ -115,7 +135,13 @@ class GroqModel(Model):
|
|
|
115
135
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
116
136
|
"""
|
|
117
137
|
self._model_name = model_name
|
|
118
|
-
|
|
138
|
+
|
|
139
|
+
if provider is not None:
|
|
140
|
+
if isinstance(provider, str):
|
|
141
|
+
self.client = infer_provider(provider).client
|
|
142
|
+
else:
|
|
143
|
+
self.client = provider.client
|
|
144
|
+
elif groq_client is not None:
|
|
119
145
|
assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
|
|
120
146
|
assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
|
|
121
147
|
self.client = groq_client
|
|
@@ -160,7 +186,7 @@ class GroqModel(Model):
|
|
|
160
186
|
return self._model_name
|
|
161
187
|
|
|
162
188
|
@property
|
|
163
|
-
def system(self) -> str
|
|
189
|
+
def system(self) -> str:
|
|
164
190
|
"""The system / model provider."""
|
|
165
191
|
return self._system
|
|
166
192
|
|
|
@@ -175,11 +175,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
175
175
|
)
|
|
176
176
|
)
|
|
177
177
|
new_attributes: dict[str, AttributeValue] = usage.opentelemetry_attributes() # type: ignore
|
|
178
|
-
|
|
179
|
-
# FallbackModel sets model_used on the response so that we can report the attributes
|
|
180
|
-
# of the model that was actually used.
|
|
181
|
-
new_attributes.update(self.model_attributes(model_used))
|
|
182
|
-
attributes.update(new_attributes)
|
|
178
|
+
attributes.update(getattr(span, 'attributes', {}))
|
|
183
179
|
request_model = attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]
|
|
184
180
|
new_attributes['gen_ai.response.model'] = response.model_name or request_model
|
|
185
181
|
span.set_attributes(new_attributes)
|
|
@@ -213,10 +209,8 @@ class InstrumentedModel(WrapperModel):
|
|
|
213
209
|
|
|
214
210
|
@staticmethod
|
|
215
211
|
def model_attributes(model: Model):
|
|
216
|
-
system = getattr(model, 'system', '') or model.__class__.__name__.removesuffix('Model').lower()
|
|
217
|
-
system = {'google-gla': 'gemini', 'google-vertex': 'vertex_ai', 'mistral': 'mistral_ai'}.get(system, system)
|
|
218
212
|
attributes: dict[str, AttributeValue] = {
|
|
219
|
-
GEN_AI_SYSTEM_ATTRIBUTE: system,
|
|
213
|
+
GEN_AI_SYSTEM_ATTRIBUTE: model.system,
|
|
220
214
|
GEN_AI_REQUEST_MODEL_ATTRIBUTE: model.model_name,
|
|
221
215
|
}
|
|
222
216
|
if base_url := model.base_url:
|
|
@@ -110,7 +110,7 @@ class MistralModel(Model):
|
|
|
110
110
|
json_mode_schema_prompt: str = """Answer in JSON Object, respect the format:\n```\n{schema}\n```\n"""
|
|
111
111
|
|
|
112
112
|
_model_name: MistralModelName = field(repr=False)
|
|
113
|
-
_system: str
|
|
113
|
+
_system: str = field(default='mistral_ai', repr=False)
|
|
114
114
|
|
|
115
115
|
def __init__(
|
|
116
116
|
self,
|
|
@@ -179,7 +179,7 @@ class MistralModel(Model):
|
|
|
179
179
|
return self._model_name
|
|
180
180
|
|
|
181
181
|
@property
|
|
182
|
-
def system(self) -> str
|
|
182
|
+
def system(self) -> str:
|
|
183
183
|
"""The system / model provider."""
|
|
184
184
|
return self._system
|
|
185
185
|
|
|
@@ -99,16 +99,16 @@ class OpenAIModel(Model):
|
|
|
99
99
|
system_prompt_role: OpenAISystemPromptRole | None = field(default=None)
|
|
100
100
|
|
|
101
101
|
_model_name: OpenAIModelName = field(repr=False)
|
|
102
|
-
_system: str
|
|
102
|
+
_system: str = field(repr=False)
|
|
103
103
|
|
|
104
104
|
@overload
|
|
105
105
|
def __init__(
|
|
106
106
|
self,
|
|
107
107
|
model_name: OpenAIModelName,
|
|
108
108
|
*,
|
|
109
|
-
provider: Literal['openai', 'deepseek'] | Provider[AsyncOpenAI] = 'openai',
|
|
109
|
+
provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] = 'openai',
|
|
110
110
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
111
|
-
system: str
|
|
111
|
+
system: str = 'openai',
|
|
112
112
|
) -> None: ...
|
|
113
113
|
|
|
114
114
|
@deprecated('Use the `provider` parameter instead of `base_url`, `api_key`, `openai_client` and `http_client`.')
|
|
@@ -123,20 +123,20 @@ class OpenAIModel(Model):
|
|
|
123
123
|
openai_client: AsyncOpenAI | None = None,
|
|
124
124
|
http_client: AsyncHTTPClient | None = None,
|
|
125
125
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
126
|
-
system: str
|
|
126
|
+
system: str = 'openai',
|
|
127
127
|
) -> None: ...
|
|
128
128
|
|
|
129
129
|
def __init__(
|
|
130
130
|
self,
|
|
131
131
|
model_name: OpenAIModelName,
|
|
132
132
|
*,
|
|
133
|
-
provider: Literal['openai', 'deepseek'] | Provider[AsyncOpenAI] | None = None,
|
|
133
|
+
provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] | None = None,
|
|
134
134
|
base_url: str | None = None,
|
|
135
135
|
api_key: str | None = None,
|
|
136
136
|
openai_client: AsyncOpenAI | None = None,
|
|
137
137
|
http_client: AsyncHTTPClient | None = None,
|
|
138
138
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
139
|
-
system: str
|
|
139
|
+
system: str = 'openai',
|
|
140
140
|
):
|
|
141
141
|
"""Initialize an OpenAI model.
|
|
142
142
|
|
|
@@ -224,7 +224,7 @@ class OpenAIModel(Model):
|
|
|
224
224
|
return self._model_name
|
|
225
225
|
|
|
226
226
|
@property
|
|
227
|
-
def system(self) -> str
|
|
227
|
+
def system(self) -> str:
|
|
228
228
|
"""The system / model provider."""
|
|
229
229
|
return self._system
|
|
230
230
|
|
|
@@ -79,7 +79,7 @@ class TestModel(Model):
|
|
|
79
79
|
This is set when a request is made, so will reflect the function tools from the last step of the last run.
|
|
80
80
|
"""
|
|
81
81
|
_model_name: str = field(default='test', repr=False)
|
|
82
|
-
_system: str
|
|
82
|
+
_system: str = field(default='test', repr=False)
|
|
83
83
|
|
|
84
84
|
async def request(
|
|
85
85
|
self,
|
|
@@ -113,7 +113,7 @@ class TestModel(Model):
|
|
|
113
113
|
return self._model_name
|
|
114
114
|
|
|
115
115
|
@property
|
|
116
|
-
def system(self) -> str
|
|
116
|
+
def system(self) -> str:
|
|
117
117
|
"""The system / model provider."""
|
|
118
118
|
return self._system
|
|
119
119
|
|
|
@@ -69,7 +69,7 @@ class VertexAIModel(GeminiModel):
|
|
|
69
69
|
url_template: str
|
|
70
70
|
|
|
71
71
|
_model_name: GeminiModelName = field(repr=False)
|
|
72
|
-
_system: str
|
|
72
|
+
_system: str = field(default='vertex_ai', repr=False)
|
|
73
73
|
|
|
74
74
|
# TODO __init__ can be removed once we drop 3.9 and we can set kw_only correctly on the dataclass
|
|
75
75
|
def __init__(
|
|
@@ -175,7 +175,7 @@ class VertexAIModel(GeminiModel):
|
|
|
175
175
|
return self._model_name
|
|
176
176
|
|
|
177
177
|
@property
|
|
178
|
-
def system(self) -> str
|
|
178
|
+
def system(self) -> str:
|
|
179
179
|
"""The system / model provider."""
|
|
180
180
|
return self._system
|
|
181
181
|
|
|
@@ -65,5 +65,9 @@ def infer_provider(provider: str) -> Provider[Any]:
|
|
|
65
65
|
from .bedrock import BedrockProvider
|
|
66
66
|
|
|
67
67
|
return BedrockProvider()
|
|
68
|
+
elif provider == 'groq':
|
|
69
|
+
from .groq import GroqProvider
|
|
70
|
+
|
|
71
|
+
return GroqProvider()
|
|
68
72
|
else: # pragma: no cover
|
|
69
73
|
raise ValueError(f'Unknown provider: {provider}')
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import overload
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
from openai import AsyncOpenAI
|
|
8
|
+
|
|
9
|
+
from pydantic_ai.models import cached_async_http_client
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from openai import AsyncAzureOpenAI
|
|
13
|
+
except ImportError as _import_error: # pragma: no cover
|
|
14
|
+
raise ImportError(
|
|
15
|
+
'Please install the `openai` package to use the Azure provider, '
|
|
16
|
+
"you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
|
|
17
|
+
) from _import_error
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
from . import Provider
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AzureProvider(Provider[AsyncOpenAI]):
|
|
24
|
+
"""Provider for Azure OpenAI API.
|
|
25
|
+
|
|
26
|
+
See <https://azure.microsoft.com/en-us/products/ai-foundry> for more information.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def name(self) -> str:
|
|
31
|
+
return 'azure'
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def base_url(self) -> str:
|
|
35
|
+
assert self._base_url is not None
|
|
36
|
+
return self._base_url
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def client(self) -> AsyncOpenAI:
|
|
40
|
+
return self._client
|
|
41
|
+
|
|
42
|
+
@overload
|
|
43
|
+
def __init__(self, *, openai_client: AsyncAzureOpenAI) -> None: ...
|
|
44
|
+
|
|
45
|
+
@overload
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
*,
|
|
49
|
+
azure_endpoint: str | None = None,
|
|
50
|
+
api_version: str | None = None,
|
|
51
|
+
api_key: str | None = None,
|
|
52
|
+
http_client: httpx.AsyncClient | None = None,
|
|
53
|
+
) -> None: ...
|
|
54
|
+
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
*,
|
|
58
|
+
azure_endpoint: str | None = None,
|
|
59
|
+
api_version: str | None = None,
|
|
60
|
+
api_key: str | None = None,
|
|
61
|
+
openai_client: AsyncAzureOpenAI | None = None,
|
|
62
|
+
http_client: httpx.AsyncClient | None = None,
|
|
63
|
+
) -> None:
|
|
64
|
+
"""Create a new Azure provider.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
azure_endpoint: The Azure endpoint to use for authentication, if not provided, the `AZURE_OPENAI_ENDPOINT`
|
|
68
|
+
environment variable will be used if available.
|
|
69
|
+
api_version: The API version to use for authentication, if not provided, the `OPENAI_API_VERSION`
|
|
70
|
+
environment variable will be used if available.
|
|
71
|
+
api_key: The API key to use for authentication, if not provided, the `AZURE_OPENAI_API_KEY` environment variable
|
|
72
|
+
will be used if available.
|
|
73
|
+
openai_client: An existing
|
|
74
|
+
[`AsyncAzureOpenAI`](https://github.com/openai/openai-python#microsoft-azure-openai)
|
|
75
|
+
client to use. If provided, `base_url`, `api_key`, and `http_client` must be `None`.
|
|
76
|
+
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
77
|
+
"""
|
|
78
|
+
if openai_client is not None:
|
|
79
|
+
assert azure_endpoint is None, 'Cannot provide both `openai_client` and `azure_endpoint`'
|
|
80
|
+
assert http_client is None, 'Cannot provide both `openai_client` and `http_client`'
|
|
81
|
+
assert api_key is None, 'Cannot provide both `openai_client` and `api_key`'
|
|
82
|
+
self._base_url = str(openai_client.base_url)
|
|
83
|
+
self._client = openai_client
|
|
84
|
+
else:
|
|
85
|
+
azure_endpoint = azure_endpoint or os.getenv('AZURE_OPENAI_ENDPOINT')
|
|
86
|
+
if azure_endpoint is None: # pragma: no cover
|
|
87
|
+
raise ValueError(
|
|
88
|
+
'Must provide one of the `azure_endpoint` argument or the `AZURE_OPENAI_ENDPOINT` environment variable'
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if api_key is None and 'OPENAI_API_KEY' not in os.environ: # pragma: no cover
|
|
92
|
+
raise ValueError(
|
|
93
|
+
'Must provide one of the `api_key` argument or the `OPENAI_API_KEY` environment variable'
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if api_version is None and 'OPENAI_API_VERSION' not in os.environ: # pragma: no cover
|
|
97
|
+
raise ValueError(
|
|
98
|
+
'Must provide one of the `api_version` argument or the `OPENAI_API_VERSION` environment variable'
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
http_client = http_client or cached_async_http_client()
|
|
102
|
+
self._client = AsyncAzureOpenAI(
|
|
103
|
+
azure_endpoint=azure_endpoint,
|
|
104
|
+
api_key=api_key,
|
|
105
|
+
api_version=api_version,
|
|
106
|
+
http_client=http_client,
|
|
107
|
+
)
|
|
108
|
+
self._base_url = str(self._client.base_url)
|
|
@@ -10,7 +10,7 @@ try:
|
|
|
10
10
|
from botocore.exceptions import NoRegionError
|
|
11
11
|
except ImportError as _import_error:
|
|
12
12
|
raise ImportError(
|
|
13
|
-
'Please install `boto3` to use the Bedrock provider, '
|
|
13
|
+
'Please install the `boto3` package to use the Bedrock provider, '
|
|
14
14
|
"you can use the `bedrock` optional group — `pip install 'pydantic-ai-slim[bedrock]'`"
|
|
15
15
|
) from _import_error
|
|
16
16
|
|
|
@@ -12,7 +12,7 @@ try:
|
|
|
12
12
|
from openai import AsyncOpenAI
|
|
13
13
|
except ImportError as _import_error: # pragma: no cover
|
|
14
14
|
raise ImportError(
|
|
15
|
-
'Please install `openai` to use the DeepSeek provider, '
|
|
15
|
+
'Please install the `openai` package to use the DeepSeek provider, '
|
|
16
16
|
"you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
|
|
17
17
|
) from _import_error
|
|
18
18
|
|
|
@@ -21,7 +21,7 @@ try:
|
|
|
21
21
|
from google.oauth2.service_account import Credentials as ServiceAccountCredentials
|
|
22
22
|
except ImportError as _import_error:
|
|
23
23
|
raise ImportError(
|
|
24
|
-
'Please install `google-auth` to use the Google Vertex AI provider, '
|
|
24
|
+
'Please install the `google-auth` package to use the Google Vertex AI provider, '
|
|
25
25
|
"you can use the `vertexai` optional group — `pip install 'pydantic-ai-slim[vertexai]'`"
|
|
26
26
|
) from _import_error
|
|
27
27
|
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import overload
|
|
5
|
+
|
|
6
|
+
from httpx import AsyncClient as AsyncHTTPClient
|
|
7
|
+
|
|
8
|
+
from pydantic_ai.models import cached_async_http_client
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from groq import AsyncGroq
|
|
12
|
+
except ImportError as _import_error: # pragma: no cover
|
|
13
|
+
raise ImportError(
|
|
14
|
+
'Please install the `groq` package to use the Groq provider, '
|
|
15
|
+
"you can use the `groq` optional group — `pip install 'pydantic-ai-slim[groq]'`"
|
|
16
|
+
) from _import_error
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
from . import Provider
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GroqProvider(Provider[AsyncGroq]):
|
|
23
|
+
"""Provider for Groq API."""
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def name(self) -> str:
|
|
27
|
+
return 'groq'
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def base_url(self) -> str:
|
|
31
|
+
return os.environ.get('GROQ_BASE_URL', 'https://api.groq.com')
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def client(self) -> AsyncGroq:
|
|
35
|
+
return self._client
|
|
36
|
+
|
|
37
|
+
@overload
|
|
38
|
+
def __init__(self, *, groq_client: AsyncGroq | None = None) -> None: ...
|
|
39
|
+
|
|
40
|
+
@overload
|
|
41
|
+
def __init__(self, *, api_key: str | None = None, http_client: AsyncHTTPClient | None = None) -> None: ...
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
*,
|
|
46
|
+
api_key: str | None = None,
|
|
47
|
+
groq_client: AsyncGroq | None = None,
|
|
48
|
+
http_client: AsyncHTTPClient | None = None,
|
|
49
|
+
) -> None:
|
|
50
|
+
"""Create a new Groq provider.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
|
|
54
|
+
will be used if available.
|
|
55
|
+
groq_client: An existing
|
|
56
|
+
[`AsyncGroq`](https://github.com/groq/groq-python?tab=readme-ov-file#async-usage)
|
|
57
|
+
client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
58
|
+
http_client: An existing `AsyncHTTPClient` to use for making HTTP requests.
|
|
59
|
+
"""
|
|
60
|
+
api_key = api_key or os.environ.get('GROQ_API_KEY')
|
|
61
|
+
|
|
62
|
+
if api_key is None and groq_client is None:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
'Set the `GROQ_API_KEY` environment variable or pass it via `GroqProvider(api_key=...)`'
|
|
65
|
+
'to use the Groq provider.'
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
if groq_client is not None:
|
|
69
|
+
self._client = groq_client
|
|
70
|
+
elif http_client is not None:
|
|
71
|
+
self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
|
72
|
+
else:
|
|
73
|
+
self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=cached_async_http_client())
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
-
from typing import TypeVar
|
|
5
4
|
|
|
6
5
|
import httpx
|
|
7
6
|
|
|
@@ -11,15 +10,13 @@ try:
|
|
|
11
10
|
from openai import AsyncOpenAI
|
|
12
11
|
except ImportError as _import_error: # pragma: no cover
|
|
13
12
|
raise ImportError(
|
|
14
|
-
'Please install `openai` to use the OpenAI provider, '
|
|
13
|
+
'Please install the `openai` package to use the OpenAI provider, '
|
|
15
14
|
"you can use the `openai` optional group — `pip install 'pydantic-ai-slim[openai]'`"
|
|
16
15
|
) from _import_error
|
|
17
16
|
|
|
18
17
|
|
|
19
18
|
from . import Provider
|
|
20
19
|
|
|
21
|
-
InterfaceClient = TypeVar('InterfaceClient')
|
|
22
|
-
|
|
23
20
|
|
|
24
21
|
class OpenAIProvider(Provider[AsyncOpenAI]):
|
|
25
22
|
"""Provider for OpenAI API."""
|
|
@@ -55,7 +52,7 @@ class OpenAIProvider(Provider[AsyncOpenAI]):
|
|
|
55
52
|
client to use. If provided, `base_url`, `api_key`, and `http_client` must be `None`.
|
|
56
53
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
57
54
|
"""
|
|
58
|
-
self._base_url = base_url or 'https://api.openai.com/v1'
|
|
55
|
+
self._base_url = base_url or os.getenv('OPENAI_BASE_URL', 'https://api.openai.com/v1')
|
|
59
56
|
# This is a workaround for the OpenAI client requiring an API key, whilst locally served,
|
|
60
57
|
# openai compatible models do not always need an API key, but a placeholder (non-empty) key is required.
|
|
61
58
|
if api_key is None and 'OPENAI_API_KEY' not in os.environ and openai_client is None:
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai-slim"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.40"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
|
|
9
9
|
authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
|
|
10
10
|
license = "MIT"
|
|
@@ -36,7 +36,7 @@ dependencies = [
|
|
|
36
36
|
"griffe>=1.3.2",
|
|
37
37
|
"httpx>=0.27",
|
|
38
38
|
"pydantic>=2.10",
|
|
39
|
-
"pydantic-graph==0.0.
|
|
39
|
+
"pydantic-graph==0.0.40",
|
|
40
40
|
"exceptiongroup; python_version < '3.11'",
|
|
41
41
|
"opentelemetry-api>=1.28.0",
|
|
42
42
|
"typing-inspection>=0.4.0",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|