pydantic-ai-slim 0.0.37__tar.gz → 0.0.39__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/PKG-INFO +2 -2
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/__init__.py +2 -1
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/messages.py +112 -11
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/anthropic.py +29 -2
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/bedrock.py +51 -7
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/fallback.py +3 -1
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/gemini.py +10 -16
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/groq.py +39 -9
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/instrumented.py +43 -26
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/mistral.py +3 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/openai.py +22 -1
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/__init__.py +4 -0
- pydantic_ai_slim-0.0.39/pydantic_ai/providers/groq.py +75 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/usage.py +1 -1
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pyproject.toml +2 -2
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/.gitignore +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/README.md +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_result.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/agent.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/vertexai.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.0.37 → pydantic_ai_slim-0.0.39}/pydantic_ai/tools.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.39
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.39
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -10,7 +10,7 @@ from .exceptions import (
|
|
|
10
10
|
UsageLimitExceeded,
|
|
11
11
|
UserError,
|
|
12
12
|
)
|
|
13
|
-
from .messages import AudioUrl, BinaryContent, ImageUrl
|
|
13
|
+
from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl
|
|
14
14
|
from .tools import RunContext, Tool
|
|
15
15
|
|
|
16
16
|
__all__ = (
|
|
@@ -33,6 +33,7 @@ __all__ = (
|
|
|
33
33
|
# messages
|
|
34
34
|
'ImageUrl',
|
|
35
35
|
'AudioUrl',
|
|
36
|
+
'DocumentUrl',
|
|
36
37
|
'BinaryContent',
|
|
37
38
|
# tools
|
|
38
39
|
'Tool',
|
|
@@ -4,6 +4,7 @@ import uuid
|
|
|
4
4
|
from collections.abc import Sequence
|
|
5
5
|
from dataclasses import dataclass, field, replace
|
|
6
6
|
from datetime import datetime
|
|
7
|
+
from mimetypes import guess_type
|
|
7
8
|
from typing import Annotated, Any, Literal, Union, cast, overload
|
|
8
9
|
|
|
9
10
|
import pydantic
|
|
@@ -83,9 +84,57 @@ class ImageUrl:
|
|
|
83
84
|
else:
|
|
84
85
|
raise ValueError(f'Unknown image file extension: {self.url}')
|
|
85
86
|
|
|
87
|
+
@property
|
|
88
|
+
def format(self) -> ImageFormat:
|
|
89
|
+
"""The file format of the image.
|
|
90
|
+
|
|
91
|
+
The choice of supported formats were based on the Bedrock Converse API. Other APIs don't require to use a format.
|
|
92
|
+
"""
|
|
93
|
+
return _image_format(self.media_type)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class DocumentUrl:
|
|
98
|
+
"""The URL of the document."""
|
|
99
|
+
|
|
100
|
+
url: str
|
|
101
|
+
"""The URL of the document."""
|
|
102
|
+
|
|
103
|
+
kind: Literal['document-url'] = 'document-url'
|
|
104
|
+
"""Type identifier, this is available on all parts as a discriminator."""
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def media_type(self) -> str:
|
|
108
|
+
"""Return the media type of the document, based on the url."""
|
|
109
|
+
type_, _ = guess_type(self.url)
|
|
110
|
+
if type_ is None:
|
|
111
|
+
raise RuntimeError(f'Unknown document file extension: {self.url}')
|
|
112
|
+
return type_
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def format(self) -> DocumentFormat:
|
|
116
|
+
"""The file format of the document.
|
|
117
|
+
|
|
118
|
+
The choice of supported formats were based on the Bedrock Converse API. Other APIs don't require to use a format.
|
|
119
|
+
"""
|
|
120
|
+
return _document_format(self.media_type)
|
|
121
|
+
|
|
86
122
|
|
|
87
123
|
AudioMediaType: TypeAlias = Literal['audio/wav', 'audio/mpeg']
|
|
88
124
|
ImageMediaType: TypeAlias = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
|
|
125
|
+
DocumentMediaType: TypeAlias = Literal[
|
|
126
|
+
'application/pdf',
|
|
127
|
+
'text/plain',
|
|
128
|
+
'text/csv',
|
|
129
|
+
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
130
|
+
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
131
|
+
'text/html',
|
|
132
|
+
'text/markdown',
|
|
133
|
+
'application/vnd.ms-excel',
|
|
134
|
+
]
|
|
135
|
+
AudioFormat: TypeAlias = Literal['wav', 'mp3']
|
|
136
|
+
ImageFormat: TypeAlias = Literal['jpeg', 'png', 'gif', 'webp']
|
|
137
|
+
DocumentFormat: TypeAlias = Literal['csv', 'doc', 'docx', 'html', 'md', 'pdf', 'txt', 'xls', 'xlsx']
|
|
89
138
|
|
|
90
139
|
|
|
91
140
|
@dataclass
|
|
@@ -95,7 +144,7 @@ class BinaryContent:
|
|
|
95
144
|
data: bytes
|
|
96
145
|
"""The binary data."""
|
|
97
146
|
|
|
98
|
-
media_type: AudioMediaType | ImageMediaType | str
|
|
147
|
+
media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
|
|
99
148
|
"""The media type of the binary data."""
|
|
100
149
|
|
|
101
150
|
kind: Literal['binary'] = 'binary'
|
|
@@ -112,17 +161,69 @@ class BinaryContent:
|
|
|
112
161
|
return self.media_type.startswith('image/')
|
|
113
162
|
|
|
114
163
|
@property
|
|
115
|
-
def
|
|
116
|
-
"""Return the
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
164
|
+
def is_document(self) -> bool:
|
|
165
|
+
"""Return `True` if the media type is a document type."""
|
|
166
|
+
return self.media_type in {
|
|
167
|
+
'application/pdf',
|
|
168
|
+
'text/plain',
|
|
169
|
+
'text/csv',
|
|
170
|
+
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
171
|
+
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
|
172
|
+
'text/html',
|
|
173
|
+
'text/markdown',
|
|
174
|
+
'application/vnd.ms-excel',
|
|
175
|
+
}
|
|
123
176
|
|
|
124
|
-
|
|
125
|
-
|
|
177
|
+
@property
|
|
178
|
+
def format(self) -> str:
|
|
179
|
+
"""The file format of the binary content."""
|
|
180
|
+
if self.is_audio:
|
|
181
|
+
if self.media_type == 'audio/mpeg':
|
|
182
|
+
return 'mp3'
|
|
183
|
+
elif self.media_type == 'audio/wav':
|
|
184
|
+
return 'wav'
|
|
185
|
+
elif self.is_image:
|
|
186
|
+
return _image_format(self.media_type)
|
|
187
|
+
elif self.is_document:
|
|
188
|
+
return _document_format(self.media_type)
|
|
189
|
+
raise ValueError(f'Unknown media type: {self.media_type}')
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
UserContent: TypeAlias = 'str | ImageUrl | AudioUrl | DocumentUrl | BinaryContent'
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _document_format(media_type: str) -> DocumentFormat:
|
|
196
|
+
if media_type == 'application/pdf':
|
|
197
|
+
return 'pdf'
|
|
198
|
+
elif media_type == 'text/plain':
|
|
199
|
+
return 'txt'
|
|
200
|
+
elif media_type == 'text/csv':
|
|
201
|
+
return 'csv'
|
|
202
|
+
elif media_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
|
|
203
|
+
return 'docx'
|
|
204
|
+
elif media_type == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet':
|
|
205
|
+
return 'xlsx'
|
|
206
|
+
elif media_type == 'text/html':
|
|
207
|
+
return 'html'
|
|
208
|
+
elif media_type == 'text/markdown':
|
|
209
|
+
return 'md'
|
|
210
|
+
elif media_type == 'application/vnd.ms-excel':
|
|
211
|
+
return 'xls'
|
|
212
|
+
else:
|
|
213
|
+
raise ValueError(f'Unknown document media type: {media_type}')
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _image_format(media_type: str) -> ImageFormat:
|
|
217
|
+
if media_type == 'image/jpeg':
|
|
218
|
+
return 'jpeg'
|
|
219
|
+
elif media_type == 'image/png':
|
|
220
|
+
return 'png'
|
|
221
|
+
elif media_type == 'image/gif':
|
|
222
|
+
return 'gif'
|
|
223
|
+
elif media_type == 'image/webp':
|
|
224
|
+
return 'webp'
|
|
225
|
+
else:
|
|
226
|
+
raise ValueError(f'Unknown image media type: {media_type}')
|
|
126
227
|
|
|
127
228
|
|
|
128
229
|
@dataclass
|
|
@@ -9,6 +9,7 @@ from datetime import datetime, timezone
|
|
|
9
9
|
from json import JSONDecodeError, loads as json_loads
|
|
10
10
|
from typing import Any, Literal, Union, cast, overload
|
|
11
11
|
|
|
12
|
+
from anthropic.types import DocumentBlockParam
|
|
12
13
|
from httpx import AsyncClient as AsyncHTTPClient
|
|
13
14
|
from typing_extensions import assert_never
|
|
14
15
|
|
|
@@ -16,6 +17,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
|
16
17
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
17
18
|
from ..messages import (
|
|
18
19
|
BinaryContent,
|
|
20
|
+
DocumentUrl,
|
|
19
21
|
ImageUrl,
|
|
20
22
|
ModelMessage,
|
|
21
23
|
ModelRequest,
|
|
@@ -42,11 +44,13 @@ from . import (
|
|
|
42
44
|
try:
|
|
43
45
|
from anthropic import NOT_GIVEN, APIStatusError, AsyncAnthropic, AsyncStream
|
|
44
46
|
from anthropic.types import (
|
|
47
|
+
Base64PDFSourceParam,
|
|
45
48
|
ContentBlock,
|
|
46
49
|
ImageBlockParam,
|
|
47
50
|
Message as AnthropicMessage,
|
|
48
51
|
MessageParam,
|
|
49
52
|
MetadataParam,
|
|
53
|
+
PlainTextSourceParam,
|
|
50
54
|
RawContentBlockDeltaEvent,
|
|
51
55
|
RawContentBlockStartEvent,
|
|
52
56
|
RawContentBlockStopEvent,
|
|
@@ -288,7 +292,9 @@ class AnthropicModel(Model):
|
|
|
288
292
|
anthropic_messages: list[MessageParam] = []
|
|
289
293
|
for m in messages:
|
|
290
294
|
if isinstance(m, ModelRequest):
|
|
291
|
-
user_content_params: list[
|
|
295
|
+
user_content_params: list[
|
|
296
|
+
ToolResultBlockParam | TextBlockParam | ImageBlockParam | DocumentBlockParam
|
|
297
|
+
] = []
|
|
292
298
|
for request_part in m.parts:
|
|
293
299
|
if isinstance(request_part, SystemPromptPart):
|
|
294
300
|
system_prompt += request_part.content
|
|
@@ -334,7 +340,9 @@ class AnthropicModel(Model):
|
|
|
334
340
|
return system_prompt, anthropic_messages
|
|
335
341
|
|
|
336
342
|
@staticmethod
|
|
337
|
-
async def _map_user_prompt(
|
|
343
|
+
async def _map_user_prompt(
|
|
344
|
+
part: UserPromptPart,
|
|
345
|
+
) -> AsyncGenerator[ImageBlockParam | TextBlockParam | DocumentBlockParam]:
|
|
338
346
|
if isinstance(part.content, str):
|
|
339
347
|
yield TextBlockParam(text=part.content, type='text')
|
|
340
348
|
else:
|
|
@@ -379,6 +387,25 @@ class AnthropicModel(Model):
|
|
|
379
387
|
)
|
|
380
388
|
else: # pragma: no cover
|
|
381
389
|
raise RuntimeError(f'Unsupported image type: {mime_type}')
|
|
390
|
+
elif isinstance(item, DocumentUrl):
|
|
391
|
+
response = await cached_async_http_client().get(item.url)
|
|
392
|
+
response.raise_for_status()
|
|
393
|
+
if item.media_type == 'application/pdf':
|
|
394
|
+
yield DocumentBlockParam(
|
|
395
|
+
source=Base64PDFSourceParam(
|
|
396
|
+
data=io.BytesIO(response.content),
|
|
397
|
+
media_type=item.media_type,
|
|
398
|
+
type='base64',
|
|
399
|
+
),
|
|
400
|
+
type='document',
|
|
401
|
+
)
|
|
402
|
+
elif item.media_type == 'text/plain':
|
|
403
|
+
yield DocumentBlockParam(
|
|
404
|
+
source=PlainTextSourceParam(data=response.text, media_type=item.media_type, type='text'),
|
|
405
|
+
type='document',
|
|
406
|
+
)
|
|
407
|
+
else: # pragma: no cover
|
|
408
|
+
raise RuntimeError(f'Unsupported media type: {item.media_type}')
|
|
382
409
|
else:
|
|
383
410
|
raise RuntimeError(f'Unsupported content type: {type(item)}')
|
|
384
411
|
|
|
@@ -14,6 +14,10 @@ from typing_extensions import ParamSpec, assert_never
|
|
|
14
14
|
|
|
15
15
|
from pydantic_ai import _utils, result
|
|
16
16
|
from pydantic_ai.messages import (
|
|
17
|
+
AudioUrl,
|
|
18
|
+
BinaryContent,
|
|
19
|
+
DocumentUrl,
|
|
20
|
+
ImageUrl,
|
|
17
21
|
ModelMessage,
|
|
18
22
|
ModelRequest,
|
|
19
23
|
ModelResponse,
|
|
@@ -26,7 +30,7 @@ from pydantic_ai.messages import (
|
|
|
26
30
|
ToolReturnPart,
|
|
27
31
|
UserPromptPart,
|
|
28
32
|
)
|
|
29
|
-
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
|
|
33
|
+
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client
|
|
30
34
|
from pydantic_ai.providers import Provider, infer_provider
|
|
31
35
|
from pydantic_ai.settings import ModelSettings
|
|
32
36
|
from pydantic_ai.tools import ToolDefinition
|
|
@@ -37,9 +41,11 @@ if TYPE_CHECKING:
|
|
|
37
41
|
from mypy_boto3_bedrock_runtime import BedrockRuntimeClient
|
|
38
42
|
from mypy_boto3_bedrock_runtime.type_defs import (
|
|
39
43
|
ContentBlockOutputTypeDef,
|
|
44
|
+
ContentBlockUnionTypeDef,
|
|
40
45
|
ConverseResponseTypeDef,
|
|
41
46
|
ConverseStreamMetadataEventTypeDef,
|
|
42
47
|
ConverseStreamOutputTypeDef,
|
|
48
|
+
ImageBlockTypeDef,
|
|
43
49
|
InferenceConfigurationTypeDef,
|
|
44
50
|
MessageUnionTypeDef,
|
|
45
51
|
ToolChoiceTypeDef,
|
|
@@ -244,7 +250,7 @@ class BedrockConverseModel(Model):
|
|
|
244
250
|
else:
|
|
245
251
|
tool_choice = {'auto': {}}
|
|
246
252
|
|
|
247
|
-
system_prompt, bedrock_messages = self._map_message(messages)
|
|
253
|
+
system_prompt, bedrock_messages = await self._map_message(messages)
|
|
248
254
|
inference_config = self._map_inference_config(model_settings)
|
|
249
255
|
|
|
250
256
|
params = {
|
|
@@ -285,7 +291,7 @@ class BedrockConverseModel(Model):
|
|
|
285
291
|
|
|
286
292
|
return inference_config
|
|
287
293
|
|
|
288
|
-
def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[MessageUnionTypeDef]]:
|
|
294
|
+
async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[MessageUnionTypeDef]]:
|
|
289
295
|
"""Just maps a `pydantic_ai.Message` to the Bedrock `MessageUnionTypeDef`."""
|
|
290
296
|
system_prompt: str = ''
|
|
291
297
|
bedrock_messages: list[MessageUnionTypeDef] = []
|
|
@@ -295,10 +301,7 @@ class BedrockConverseModel(Model):
|
|
|
295
301
|
if isinstance(part, SystemPromptPart):
|
|
296
302
|
system_prompt += part.content
|
|
297
303
|
elif isinstance(part, UserPromptPart):
|
|
298
|
-
|
|
299
|
-
bedrock_messages.append({'role': 'user', 'content': [{'text': part.content}]})
|
|
300
|
-
else:
|
|
301
|
-
raise NotImplementedError('User prompt can only be a string for now.')
|
|
304
|
+
bedrock_messages.extend(await self._map_user_prompt(part))
|
|
302
305
|
elif isinstance(part, ToolReturnPart):
|
|
303
306
|
assert part.tool_call_id is not None
|
|
304
307
|
bedrock_messages.append(
|
|
@@ -348,6 +351,47 @@ class BedrockConverseModel(Model):
|
|
|
348
351
|
assert_never(m)
|
|
349
352
|
return system_prompt, bedrock_messages
|
|
350
353
|
|
|
354
|
+
@staticmethod
|
|
355
|
+
async def _map_user_prompt(part: UserPromptPart) -> list[MessageUnionTypeDef]:
|
|
356
|
+
content: list[ContentBlockUnionTypeDef] = []
|
|
357
|
+
if isinstance(part.content, str):
|
|
358
|
+
content.append({'text': part.content})
|
|
359
|
+
else:
|
|
360
|
+
document_count = 0
|
|
361
|
+
for item in part.content:
|
|
362
|
+
if isinstance(item, str):
|
|
363
|
+
content.append({'text': item})
|
|
364
|
+
elif isinstance(item, BinaryContent):
|
|
365
|
+
format = item.format
|
|
366
|
+
if item.is_document:
|
|
367
|
+
document_count += 1
|
|
368
|
+
name = f'Document {document_count}'
|
|
369
|
+
assert format in ('pdf', 'txt', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'md')
|
|
370
|
+
content.append({'document': {'name': name, 'format': format, 'source': {'bytes': item.data}}})
|
|
371
|
+
elif item.is_image:
|
|
372
|
+
assert format in ('jpeg', 'png', 'gif', 'webp')
|
|
373
|
+
content.append({'image': {'format': format, 'source': {'bytes': item.data}}})
|
|
374
|
+
else:
|
|
375
|
+
raise NotImplementedError('Binary content is not supported yet.')
|
|
376
|
+
elif isinstance(item, (ImageUrl, DocumentUrl)):
|
|
377
|
+
response = await cached_async_http_client().get(item.url)
|
|
378
|
+
response.raise_for_status()
|
|
379
|
+
if item.kind == 'image-url':
|
|
380
|
+
format = item.media_type.split('/')[1]
|
|
381
|
+
assert format in ('jpeg', 'png', 'gif', 'webp'), f'Unsupported image format: {format}'
|
|
382
|
+
image: ImageBlockTypeDef = {'format': format, 'source': {'bytes': response.content}}
|
|
383
|
+
content.append({'image': image})
|
|
384
|
+
elif item.kind == 'document-url':
|
|
385
|
+
document_count += 1
|
|
386
|
+
name = f'Document {document_count}'
|
|
387
|
+
data = response.content
|
|
388
|
+
content.append({'document': {'name': name, 'format': item.format, 'source': {'bytes': data}}})
|
|
389
|
+
elif isinstance(item, AudioUrl): # pragma: no cover
|
|
390
|
+
raise NotImplementedError('Audio is not supported yet.')
|
|
391
|
+
else:
|
|
392
|
+
assert_never(item)
|
|
393
|
+
return [{'role': 'user', 'content': content}]
|
|
394
|
+
|
|
351
395
|
@staticmethod
|
|
352
396
|
def _map_tool_call(t: ToolCallPart) -> ContentBlockOutputTypeDef:
|
|
353
397
|
assert t.tool_call_id is not None
|
|
@@ -61,7 +61,9 @@ class FallbackModel(Model):
|
|
|
61
61
|
|
|
62
62
|
for model in self.models:
|
|
63
63
|
try:
|
|
64
|
-
|
|
64
|
+
response, usage = await model.request(messages, model_settings, model_request_parameters)
|
|
65
|
+
response.model_used = model # type: ignore
|
|
66
|
+
return response, usage
|
|
65
67
|
except Exception as exc:
|
|
66
68
|
if self._fallback_on(exc):
|
|
67
69
|
exceptions.append(exc)
|
|
@@ -21,6 +21,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, UserError, _utils, usage
|
|
|
21
21
|
from ..messages import (
|
|
22
22
|
AudioUrl,
|
|
23
23
|
BinaryContent,
|
|
24
|
+
DocumentUrl,
|
|
24
25
|
ImageUrl,
|
|
25
26
|
ModelMessage,
|
|
26
27
|
ModelRequest,
|
|
@@ -362,22 +363,15 @@ class GeminiModel(Model):
|
|
|
362
363
|
content.append(
|
|
363
364
|
_GeminiInlineDataPart(inline_data={'data': base64_encoded, 'mime_type': item.media_type})
|
|
364
365
|
)
|
|
365
|
-
elif isinstance(item, (AudioUrl, ImageUrl)):
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
response.raise_for_status()
|
|
375
|
-
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
376
|
-
content.append(
|
|
377
|
-
_GeminiInlineDataPart(
|
|
378
|
-
inline_data={'data': base64_encoded, 'mime_type': response.headers['Content-Type']}
|
|
379
|
-
)
|
|
380
|
-
)
|
|
366
|
+
elif isinstance(item, (AudioUrl, ImageUrl, DocumentUrl)):
|
|
367
|
+
client = cached_async_http_client()
|
|
368
|
+
response = await client.get(item.url, follow_redirects=True)
|
|
369
|
+
response.raise_for_status()
|
|
370
|
+
mime_type = response.headers['Content-Type'].split(';')[0]
|
|
371
|
+
inline_data = _GeminiInlineDataPart(
|
|
372
|
+
inline_data={'data': base64.b64encode(response.content).decode('utf-8'), 'mime_type': mime_type}
|
|
373
|
+
)
|
|
374
|
+
content.append(inline_data)
|
|
381
375
|
else:
|
|
382
376
|
assert_never(item)
|
|
383
377
|
return content
|
|
@@ -9,12 +9,13 @@ from itertools import chain
|
|
|
9
9
|
from typing import Literal, Union, cast, overload
|
|
10
10
|
|
|
11
11
|
from httpx import AsyncClient as AsyncHTTPClient
|
|
12
|
-
from typing_extensions import assert_never
|
|
12
|
+
from typing_extensions import assert_never, deprecated
|
|
13
13
|
|
|
14
14
|
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
15
15
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
16
16
|
from ..messages import (
|
|
17
17
|
BinaryContent,
|
|
18
|
+
DocumentUrl,
|
|
18
19
|
ImageUrl,
|
|
19
20
|
ModelMessage,
|
|
20
21
|
ModelRequest,
|
|
@@ -28,15 +29,10 @@ from ..messages import (
|
|
|
28
29
|
ToolReturnPart,
|
|
29
30
|
UserPromptPart,
|
|
30
31
|
)
|
|
32
|
+
from ..providers import Provider, infer_provider
|
|
31
33
|
from ..settings import ModelSettings
|
|
32
34
|
from ..tools import ToolDefinition
|
|
33
|
-
from . import
|
|
34
|
-
Model,
|
|
35
|
-
ModelRequestParameters,
|
|
36
|
-
StreamedResponse,
|
|
37
|
-
cached_async_http_client,
|
|
38
|
-
check_allow_model_requests,
|
|
39
|
-
)
|
|
35
|
+
from . import Model, ModelRequestParameters, StreamedResponse, cached_async_http_client, check_allow_model_requests
|
|
40
36
|
|
|
41
37
|
try:
|
|
42
38
|
from groq import NOT_GIVEN, APIStatusError, AsyncGroq, AsyncStream
|
|
@@ -48,6 +44,7 @@ except ImportError as _import_error:
|
|
|
48
44
|
"you can use the `groq` optional group — `pip install 'pydantic-ai-slim[groq]'`"
|
|
49
45
|
) from _import_error
|
|
50
46
|
|
|
47
|
+
|
|
51
48
|
LatestGroqModelNames = Literal[
|
|
52
49
|
'llama-3.3-70b-versatile',
|
|
53
50
|
'llama-3.3-70b-specdec',
|
|
@@ -93,10 +90,31 @@ class GroqModel(Model):
|
|
|
93
90
|
_model_name: GroqModelName = field(repr=False)
|
|
94
91
|
_system: str | None = field(default='groq', repr=False)
|
|
95
92
|
|
|
93
|
+
@overload
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
model_name: GroqModelName,
|
|
97
|
+
*,
|
|
98
|
+
provider: Literal['groq'] | Provider[AsyncGroq] = 'groq',
|
|
99
|
+
) -> None: ...
|
|
100
|
+
|
|
101
|
+
@deprecated('Use the `provider` parameter instead of `api_key`, `groq_client`, and `http_client`.')
|
|
102
|
+
@overload
|
|
96
103
|
def __init__(
|
|
97
104
|
self,
|
|
98
105
|
model_name: GroqModelName,
|
|
99
106
|
*,
|
|
107
|
+
provider: None = None,
|
|
108
|
+
api_key: str | None = None,
|
|
109
|
+
groq_client: AsyncGroq | None = None,
|
|
110
|
+
http_client: AsyncHTTPClient | None = None,
|
|
111
|
+
) -> None: ...
|
|
112
|
+
|
|
113
|
+
def __init__(
|
|
114
|
+
self,
|
|
115
|
+
model_name: GroqModelName,
|
|
116
|
+
*,
|
|
117
|
+
provider: Literal['groq'] | Provider[AsyncGroq] | None = None,
|
|
100
118
|
api_key: str | None = None,
|
|
101
119
|
groq_client: AsyncGroq | None = None,
|
|
102
120
|
http_client: AsyncHTTPClient | None = None,
|
|
@@ -106,6 +124,9 @@ class GroqModel(Model):
|
|
|
106
124
|
Args:
|
|
107
125
|
model_name: The name of the Groq model to use. List of model names available
|
|
108
126
|
[here](https://console.groq.com/docs/models).
|
|
127
|
+
provider: The provider to use for authentication and API access. Can be either the string
|
|
128
|
+
'groq' or an instance of `Provider[AsyncGroq]`. If not provided, a new provider will be
|
|
129
|
+
created using the other parameters.
|
|
109
130
|
api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
|
|
110
131
|
will be used if available.
|
|
111
132
|
groq_client: An existing
|
|
@@ -114,7 +135,13 @@ class GroqModel(Model):
|
|
|
114
135
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
115
136
|
"""
|
|
116
137
|
self._model_name = model_name
|
|
117
|
-
|
|
138
|
+
|
|
139
|
+
if provider is not None:
|
|
140
|
+
if isinstance(provider, str):
|
|
141
|
+
self.client = infer_provider(provider).client
|
|
142
|
+
else:
|
|
143
|
+
self.client = provider.client
|
|
144
|
+
elif groq_client is not None:
|
|
118
145
|
assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
|
|
119
146
|
assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
|
|
120
147
|
self.client = groq_client
|
|
@@ -342,8 +369,11 @@ class GroqModel(Model):
|
|
|
342
369
|
content.append(chat.ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
|
|
343
370
|
else:
|
|
344
371
|
raise RuntimeError('Only images are supported for binary content in Groq.')
|
|
372
|
+
elif isinstance(item, DocumentUrl): # pragma: no cover
|
|
373
|
+
raise RuntimeError('DocumentUrl is not supported in Groq.')
|
|
345
374
|
else: # pragma: no cover
|
|
346
375
|
raise RuntimeError(f'Unsupported content type: {type(item)}')
|
|
376
|
+
|
|
347
377
|
return chat.ChatCompletionUserMessageParam(role='user', content=content)
|
|
348
378
|
|
|
349
379
|
|
|
@@ -88,6 +88,10 @@ class InstrumentationSettings:
|
|
|
88
88
|
self.event_mode = event_mode
|
|
89
89
|
|
|
90
90
|
|
|
91
|
+
GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'
|
|
92
|
+
GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
|
|
93
|
+
|
|
94
|
+
|
|
91
95
|
@dataclass
|
|
92
96
|
class InstrumentedModel(WrapperModel):
|
|
93
97
|
"""Model which is instrumented with OpenTelemetry."""
|
|
@@ -138,27 +142,14 @@ class InstrumentedModel(WrapperModel):
|
|
|
138
142
|
model_settings: ModelSettings | None,
|
|
139
143
|
) -> Iterator[Callable[[ModelResponse, Usage], None]]:
|
|
140
144
|
operation = 'chat'
|
|
141
|
-
|
|
142
|
-
span_name = f'{operation} {model_name}'
|
|
143
|
-
system = getattr(self.wrapped, 'system', '') or self.wrapped.__class__.__name__.removesuffix('Model').lower()
|
|
144
|
-
system = {'google-gla': 'gemini', 'google-vertex': 'vertex_ai', 'mistral': 'mistral_ai'}.get(system, system)
|
|
145
|
+
span_name = f'{operation} {self.model_name}'
|
|
145
146
|
# TODO Missing attributes:
|
|
146
147
|
# - error.type: unclear if we should do something here or just always rely on span exceptions
|
|
147
148
|
# - gen_ai.request.stop_sequences/top_k: model_settings doesn't include these
|
|
148
149
|
attributes: dict[str, AttributeValue] = {
|
|
149
150
|
'gen_ai.operation.name': operation,
|
|
150
|
-
|
|
151
|
-
'gen_ai.request.model': model_name,
|
|
151
|
+
**self.model_attributes(self.wrapped),
|
|
152
152
|
}
|
|
153
|
-
if base_url := self.wrapped.base_url:
|
|
154
|
-
try:
|
|
155
|
-
parsed = urlparse(base_url)
|
|
156
|
-
if parsed.hostname:
|
|
157
|
-
attributes['server.address'] = parsed.hostname
|
|
158
|
-
if parsed.port:
|
|
159
|
-
attributes['server.port'] = parsed.port
|
|
160
|
-
except Exception: # pragma: no cover
|
|
161
|
-
pass
|
|
162
153
|
|
|
163
154
|
if model_settings:
|
|
164
155
|
for key in MODEL_SETTING_ATTRIBUTES:
|
|
@@ -183,21 +174,26 @@ class InstrumentedModel(WrapperModel):
|
|
|
183
174
|
},
|
|
184
175
|
)
|
|
185
176
|
)
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
177
|
+
new_attributes: dict[str, AttributeValue] = usage.opentelemetry_attributes() # type: ignore
|
|
178
|
+
if model_used := getattr(response, 'model_used', None):
|
|
179
|
+
# FallbackModel sets model_used on the response so that we can report the attributes
|
|
180
|
+
# of the model that was actually used.
|
|
181
|
+
new_attributes.update(self.model_attributes(model_used))
|
|
182
|
+
attributes.update(new_attributes)
|
|
183
|
+
request_model = attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]
|
|
184
|
+
new_attributes['gen_ai.response.model'] = response.model_name or request_model
|
|
185
|
+
span.set_attributes(new_attributes)
|
|
186
|
+
span.update_name(f'{operation} {request_model}')
|
|
187
|
+
for event in events:
|
|
188
|
+
event.attributes = {
|
|
189
|
+
GEN_AI_SYSTEM_ATTRIBUTE: attributes[GEN_AI_SYSTEM_ATTRIBUTE],
|
|
190
|
+
**(event.attributes or {}),
|
|
192
191
|
}
|
|
193
|
-
)
|
|
194
|
-
self._emit_events(system, span, events)
|
|
192
|
+
self._emit_events(span, events)
|
|
195
193
|
|
|
196
194
|
yield finish
|
|
197
195
|
|
|
198
|
-
def _emit_events(self,
|
|
199
|
-
for event in events:
|
|
200
|
-
event.attributes = {'gen_ai.system': system, **(event.attributes or {})}
|
|
196
|
+
def _emit_events(self, span: Span, events: list[Event]) -> None:
|
|
201
197
|
if self.options.event_mode == 'logs':
|
|
202
198
|
for event in events:
|
|
203
199
|
self.options.event_logger.emit(event)
|
|
@@ -215,6 +211,27 @@ class InstrumentedModel(WrapperModel):
|
|
|
215
211
|
}
|
|
216
212
|
)
|
|
217
213
|
|
|
214
|
+
@staticmethod
|
|
215
|
+
def model_attributes(model: Model):
|
|
216
|
+
system = getattr(model, 'system', '') or model.__class__.__name__.removesuffix('Model').lower()
|
|
217
|
+
system = {'google-gla': 'gemini', 'google-vertex': 'vertex_ai', 'mistral': 'mistral_ai'}.get(system, system)
|
|
218
|
+
attributes: dict[str, AttributeValue] = {
|
|
219
|
+
GEN_AI_SYSTEM_ATTRIBUTE: system,
|
|
220
|
+
GEN_AI_REQUEST_MODEL_ATTRIBUTE: model.model_name,
|
|
221
|
+
}
|
|
222
|
+
if base_url := model.base_url:
|
|
223
|
+
try:
|
|
224
|
+
parsed = urlparse(base_url)
|
|
225
|
+
except Exception: # pragma: no cover
|
|
226
|
+
pass
|
|
227
|
+
else:
|
|
228
|
+
if parsed.hostname:
|
|
229
|
+
attributes['server.address'] = parsed.hostname
|
|
230
|
+
if parsed.port:
|
|
231
|
+
attributes['server.port'] = parsed.port
|
|
232
|
+
|
|
233
|
+
return attributes
|
|
234
|
+
|
|
218
235
|
@staticmethod
|
|
219
236
|
def event_to_dict(event: Event) -> dict[str, Any]:
|
|
220
237
|
if not event.body:
|
|
@@ -17,6 +17,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, _utils
|
|
|
17
17
|
from .._utils import now_utc as _now_utc
|
|
18
18
|
from ..messages import (
|
|
19
19
|
BinaryContent,
|
|
20
|
+
DocumentUrl,
|
|
20
21
|
ImageUrl,
|
|
21
22
|
ModelMessage,
|
|
22
23
|
ModelRequest,
|
|
@@ -495,6 +496,8 @@ class MistralModel(Model):
|
|
|
495
496
|
content.append(MistralImageURLChunk(image_url=image_url, type='image_url'))
|
|
496
497
|
else:
|
|
497
498
|
raise RuntimeError('Only image binary content is supported for Mistral.')
|
|
499
|
+
elif isinstance(item, DocumentUrl):
|
|
500
|
+
raise RuntimeError('DocumentUrl is not supported in Mistral.')
|
|
498
501
|
else: # pragma: no cover
|
|
499
502
|
raise RuntimeError(f'Unsupported content type: {type(item)}')
|
|
500
503
|
return MistralUserMessage(content=content)
|
|
@@ -18,6 +18,7 @@ from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
|
18
18
|
from ..messages import (
|
|
19
19
|
AudioUrl,
|
|
20
20
|
BinaryContent,
|
|
21
|
+
DocumentUrl,
|
|
21
22
|
ImageUrl,
|
|
22
23
|
ModelMessage,
|
|
23
24
|
ModelRequest,
|
|
@@ -418,7 +419,8 @@ class OpenAIModel(Model):
|
|
|
418
419
|
image_url = ImageURL(url=f'data:{item.media_type};base64,{base64_encoded}')
|
|
419
420
|
content.append(ChatCompletionContentPartImageParam(image_url=image_url, type='image_url'))
|
|
420
421
|
elif item.is_audio:
|
|
421
|
-
|
|
422
|
+
assert item.format in ('wav', 'mp3')
|
|
423
|
+
audio = InputAudio(data=base64_encoded, format=item.format)
|
|
422
424
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
423
425
|
else: # pragma: no cover
|
|
424
426
|
raise RuntimeError(f'Unsupported binary content type: {item.media_type}')
|
|
@@ -429,6 +431,25 @@ class OpenAIModel(Model):
|
|
|
429
431
|
base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
430
432
|
audio = InputAudio(data=base64_encoded, format=response.headers.get('content-type'))
|
|
431
433
|
content.append(ChatCompletionContentPartInputAudioParam(input_audio=audio, type='input_audio'))
|
|
434
|
+
elif isinstance(item, DocumentUrl): # pragma: no cover
|
|
435
|
+
raise NotImplementedError('DocumentUrl is not supported for OpenAI')
|
|
436
|
+
# The following implementation should have worked, but it seems we have the following error:
|
|
437
|
+
# pydantic_ai.exceptions.ModelHTTPError: status_code: 400, model_name: gpt-4o, body:
|
|
438
|
+
# {
|
|
439
|
+
# 'message': "Unknown parameter: 'messages[1].content[1].file.data'.",
|
|
440
|
+
# 'type': 'invalid_request_error',
|
|
441
|
+
# 'param': 'messages[1].content[1].file.data',
|
|
442
|
+
# 'code': 'unknown_parameter'
|
|
443
|
+
# }
|
|
444
|
+
#
|
|
445
|
+
# client = cached_async_http_client()
|
|
446
|
+
# response = await client.get(item.url)
|
|
447
|
+
# response.raise_for_status()
|
|
448
|
+
# base64_encoded = base64.b64encode(response.content).decode('utf-8')
|
|
449
|
+
# media_type = response.headers.get('content-type').split(';')[0]
|
|
450
|
+
# file_data = f'data:{media_type};base64,{base64_encoded}'
|
|
451
|
+
# file = File(file={'file_data': file_data, 'file_name': item.url, 'file_id': item.url}, type='file')
|
|
452
|
+
# content.append(file)
|
|
432
453
|
else:
|
|
433
454
|
assert_never(item)
|
|
434
455
|
return chat.ChatCompletionUserMessageParam(role='user', content=content)
|
|
@@ -65,5 +65,9 @@ def infer_provider(provider: str) -> Provider[Any]:
|
|
|
65
65
|
from .bedrock import BedrockProvider
|
|
66
66
|
|
|
67
67
|
return BedrockProvider()
|
|
68
|
+
elif provider == 'groq':
|
|
69
|
+
from .groq import GroqProvider
|
|
70
|
+
|
|
71
|
+
return GroqProvider()
|
|
68
72
|
else: # pragma: no cover
|
|
69
73
|
raise ValueError(f'Unknown provider: {provider}')
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import overload
|
|
5
|
+
|
|
6
|
+
from httpx import AsyncClient as AsyncHTTPClient
|
|
7
|
+
|
|
8
|
+
from pydantic_ai.models import cached_async_http_client
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from groq import AsyncGroq
|
|
12
|
+
except ImportError as _import_error: # pragma: no cover
|
|
13
|
+
raise ImportError(
|
|
14
|
+
'Please install `groq` to use the Groq provider, '
|
|
15
|
+
"you can use the `groq` optional group — `pip install 'pydantic-ai-slim[groq]'`"
|
|
16
|
+
) from _import_error
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
from . import Provider
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GroqProvider(Provider[AsyncGroq]):
|
|
23
|
+
"""Provider for Groq API."""
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def name(self) -> str:
|
|
27
|
+
return 'groq'
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def base_url(self) -> str:
|
|
31
|
+
return os.environ.get('GROQ_BASE_URL', 'https://api.groq.com')
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def client(self) -> AsyncGroq:
|
|
35
|
+
return self._client
|
|
36
|
+
|
|
37
|
+
@overload
|
|
38
|
+
def __init__(self, *, groq_client: AsyncGroq | None = None) -> None: ...
|
|
39
|
+
|
|
40
|
+
@overload
|
|
41
|
+
def __init__(self, *, api_key: str | None = None, http_client: AsyncHTTPClient | None = None) -> None: ...
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
*,
|
|
46
|
+
api_key: str | None = None,
|
|
47
|
+
groq_client: AsyncGroq | None = None,
|
|
48
|
+
http_client: AsyncHTTPClient | None = None,
|
|
49
|
+
) -> None:
|
|
50
|
+
"""Create a new Groq provider.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
api_key: The API key to use for authentication, if not provided, the `GROQ_API_KEY` environment variable
|
|
54
|
+
will be used if available.
|
|
55
|
+
groq_client: An existing
|
|
56
|
+
[`AsyncGroq`](https://github.com/groq/groq-python?tab=readme-ov-file#async-usage)
|
|
57
|
+
client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
58
|
+
http_client: An existing `AsyncHTTPClient` to use for making HTTP requests.
|
|
59
|
+
"""
|
|
60
|
+
api_key = api_key or os.environ.get('GROQ_API_KEY')
|
|
61
|
+
|
|
62
|
+
if api_key is None and groq_client is None:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
'Set the `GROQ_API_KEY` environment variable or pass it via `GroqProvider(api_key=...)`'
|
|
65
|
+
'to use the Groq provider.'
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
if groq_client is not None:
|
|
69
|
+
assert http_client is None, 'Cannot provide both `groq_client` and `http_client`'
|
|
70
|
+
assert api_key is None, 'Cannot provide both `groq_client` and `api_key`'
|
|
71
|
+
self._client = groq_client
|
|
72
|
+
elif http_client is not None:
|
|
73
|
+
self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=http_client)
|
|
74
|
+
else:
|
|
75
|
+
self._client = AsyncGroq(base_url=self.base_url, api_key=api_key, http_client=cached_async_http_client())
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai-slim"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.39"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
|
|
9
9
|
authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
|
|
10
10
|
license = "MIT"
|
|
@@ -36,7 +36,7 @@ dependencies = [
|
|
|
36
36
|
"griffe>=1.3.2",
|
|
37
37
|
"httpx>=0.27",
|
|
38
38
|
"pydantic>=2.10",
|
|
39
|
-
"pydantic-graph==0.0.
|
|
39
|
+
"pydantic-graph==0.0.39",
|
|
40
40
|
"exceptiongroup; python_version < '3.11'",
|
|
41
41
|
"opentelemetry-api>=1.28.0",
|
|
42
42
|
"typing-inspection>=0.4.0",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|