pydantic-ai-slim 0.0.42__tar.gz → 0.0.44__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_cli.py +2 -2
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_griffe.py +29 -2
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/agent.py +4 -3
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/duckduckgo.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/tavily.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/mcp.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/messages.py +3 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/__init__.py +15 -14
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/anthropic.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/cohere.py +39 -5
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/groq.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/instrumented.py +27 -10
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/mistral.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/openai.py +3 -9
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/vertexai.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/wrapper.py +5 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/__init__.py +4 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/anthropic.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/azure.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/bedrock.py +1 -1
- pydantic_ai_slim-0.0.44/pydantic_ai/providers/cohere.py +72 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/deepseek.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/google_vertex.py +11 -19
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/groq.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/mistral.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/openai.py +1 -1
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pyproject.toml +3 -3
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/.gitignore +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/README.md +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_result.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/gemini.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.0.42 → pydantic_ai_slim-0.0.44}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.44
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.44
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -53,7 +53,7 @@ Requires-Dist: mcp>=1.4.1; (python_version >= '3.10') and extra == 'mcp'
|
|
|
53
53
|
Provides-Extra: mistral
|
|
54
54
|
Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
|
|
55
55
|
Provides-Extra: openai
|
|
56
|
-
Requires-Dist: openai>=1.
|
|
56
|
+
Requires-Dist: openai>=1.67.0; extra == 'openai'
|
|
57
57
|
Provides-Extra: tavily
|
|
58
58
|
Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
|
|
59
59
|
Provides-Extra: vertexai
|
|
@@ -31,13 +31,13 @@ try:
|
|
|
31
31
|
except ImportError as _import_error:
|
|
32
32
|
raise ImportError(
|
|
33
33
|
'Please install `rich`, `prompt-toolkit` and `argcomplete` to use the PydanticAI CLI, '
|
|
34
|
-
|
|
34
|
+
'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
|
|
35
35
|
) from _import_error
|
|
36
36
|
|
|
37
37
|
from pydantic_ai.agent import Agent
|
|
38
38
|
from pydantic_ai.messages import ModelMessage, PartDeltaEvent, TextPartDelta
|
|
39
39
|
|
|
40
|
-
__version__ = version('pydantic-ai')
|
|
40
|
+
__version__ = version('pydantic-ai-slim')
|
|
41
41
|
|
|
42
42
|
|
|
43
43
|
class SimpleCodeBlock(CodeBlock):
|
|
@@ -22,8 +22,16 @@ def doc_descriptions(
|
|
|
22
22
|
) -> tuple[str, dict[str, str]]:
|
|
23
23
|
"""Extract the function description and parameter descriptions from a function's docstring.
|
|
24
24
|
|
|
25
|
+
The function parses the docstring using the specified format (or infers it if 'auto')
|
|
26
|
+
and extracts both the main description and parameter descriptions. If a returns section
|
|
27
|
+
is present in the docstring, the main description will be formatted as XML.
|
|
28
|
+
|
|
25
29
|
Returns:
|
|
26
|
-
A tuple
|
|
30
|
+
A tuple containing:
|
|
31
|
+
- str: Main description string, which may be either:
|
|
32
|
+
* Plain text if no returns section is present
|
|
33
|
+
* XML-formatted if returns section exists, including <summary> and <returns> tags
|
|
34
|
+
- dict[str, str]: Dictionary mapping parameter names to their descriptions
|
|
27
35
|
"""
|
|
28
36
|
doc = func.__doc__
|
|
29
37
|
if doc is None:
|
|
@@ -33,7 +41,14 @@ def doc_descriptions(
|
|
|
33
41
|
parent = cast(GriffeObject, sig)
|
|
34
42
|
|
|
35
43
|
docstring_style = _infer_docstring_style(doc) if docstring_format == 'auto' else docstring_format
|
|
36
|
-
docstring = Docstring(
|
|
44
|
+
docstring = Docstring(
|
|
45
|
+
doc,
|
|
46
|
+
lineno=1,
|
|
47
|
+
parser=docstring_style,
|
|
48
|
+
parent=parent,
|
|
49
|
+
# https://mkdocstrings.github.io/griffe/reference/docstrings/#google-options
|
|
50
|
+
parser_options={'returns_named_value': False, 'returns_multiple_items': False},
|
|
51
|
+
)
|
|
37
52
|
with _disable_griffe_logging():
|
|
38
53
|
sections = docstring.parse()
|
|
39
54
|
|
|
@@ -45,6 +60,18 @@ def doc_descriptions(
|
|
|
45
60
|
if main := next((p for p in sections if p.kind == DocstringSectionKind.text), None):
|
|
46
61
|
main_desc = main.value
|
|
47
62
|
|
|
63
|
+
if return_ := next((p for p in sections if p.kind == DocstringSectionKind.returns), None):
|
|
64
|
+
return_statement = return_.value[0]
|
|
65
|
+
return_desc = return_statement.description
|
|
66
|
+
return_type = return_statement.annotation
|
|
67
|
+
type_tag = f'<type>{return_type}</type>\n' if return_type else ''
|
|
68
|
+
return_xml = f'<returns>\n{type_tag}<description>{return_desc}</description>\n</returns>'
|
|
69
|
+
|
|
70
|
+
if main_desc:
|
|
71
|
+
main_desc = f'<summary>{main_desc}</summary>\n{return_xml}'
|
|
72
|
+
else:
|
|
73
|
+
main_desc = return_xml
|
|
74
|
+
|
|
48
75
|
return main_desc, params
|
|
49
76
|
|
|
50
77
|
|
|
@@ -13,7 +13,7 @@ from pydantic.json_schema import GenerateJsonSchema
|
|
|
13
13
|
from typing_extensions import TypeGuard, TypeVar, deprecated
|
|
14
14
|
|
|
15
15
|
from pydantic_graph import End, Graph, GraphRun, GraphRunContext
|
|
16
|
-
from pydantic_graph._utils import
|
|
16
|
+
from pydantic_graph._utils import run_until_complete
|
|
17
17
|
|
|
18
18
|
from . import (
|
|
19
19
|
_agent_graph,
|
|
@@ -195,6 +195,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
195
195
|
If this isn't set, then the last value set by
|
|
196
196
|
[`Agent.instrument_all()`][pydantic_ai.Agent.instrument_all]
|
|
197
197
|
will be used, which defaults to False.
|
|
198
|
+
See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info.
|
|
198
199
|
"""
|
|
199
200
|
if model is None or defer_model_check:
|
|
200
201
|
self.model = model
|
|
@@ -445,7 +446,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
445
446
|
usage_limits = usage_limits or _usage.UsageLimits()
|
|
446
447
|
|
|
447
448
|
if isinstance(model_used, InstrumentedModel):
|
|
448
|
-
tracer = model_used.
|
|
449
|
+
tracer = model_used.settings.tracer
|
|
449
450
|
else:
|
|
450
451
|
tracer = NoOpTracer()
|
|
451
452
|
agent_name = self.name or 'agent'
|
|
@@ -566,7 +567,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
566
567
|
"""
|
|
567
568
|
if infer_name and self.name is None:
|
|
568
569
|
self._infer_name(inspect.currentframe())
|
|
569
|
-
return
|
|
570
|
+
return run_until_complete(
|
|
570
571
|
self.run(
|
|
571
572
|
user_prompt,
|
|
572
573
|
result_type=result_type,
|
|
@@ -13,7 +13,7 @@ try:
|
|
|
13
13
|
except ImportError as _import_error:
|
|
14
14
|
raise ImportError(
|
|
15
15
|
'Please install `duckduckgo-search` to use the DuckDuckGo search tool, '
|
|
16
|
-
|
|
16
|
+
'you can use the `duckduckgo` optional group — `pip install "pydantic-ai-slim[duckduckgo]"`'
|
|
17
17
|
) from _import_error
|
|
18
18
|
|
|
19
19
|
__all__ = ('duckduckgo_search_tool',)
|
|
@@ -11,7 +11,7 @@ try:
|
|
|
11
11
|
except ImportError as _import_error:
|
|
12
12
|
raise ImportError(
|
|
13
13
|
'Please install `tavily-python` to use the Tavily search tool, '
|
|
14
|
-
|
|
14
|
+
'you can use the `tavily` optional group — `pip install "pydantic-ai-slim[tavily]"`'
|
|
15
15
|
) from _import_error
|
|
16
16
|
|
|
17
17
|
__all__ = ('tavily_search_tool',)
|
|
@@ -21,7 +21,7 @@ try:
|
|
|
21
21
|
except ImportError as _import_error:
|
|
22
22
|
raise ImportError(
|
|
23
23
|
'Please install the `mcp` package to use the MCP server, '
|
|
24
|
-
|
|
24
|
+
'you can use the `mcp` optional group — `pip install "pydantic-ai-slim[mcp]"`'
|
|
25
25
|
) from _import_error
|
|
26
26
|
|
|
27
27
|
__all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP'
|
|
@@ -26,6 +26,9 @@ class SystemPromptPart:
|
|
|
26
26
|
content: str
|
|
27
27
|
"""The content of the prompt."""
|
|
28
28
|
|
|
29
|
+
timestamp: datetime = field(default_factory=_now_utc)
|
|
30
|
+
"""The timestamp of the prompt."""
|
|
31
|
+
|
|
29
32
|
dynamic_ref: str | None = None
|
|
30
33
|
"""The ref of the dynamic system prompt function that generated this part.
|
|
31
34
|
|
|
@@ -12,7 +12,7 @@ from contextlib import asynccontextmanager, contextmanager
|
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
13
|
from datetime import datetime
|
|
14
14
|
from functools import cache
|
|
15
|
-
from typing import TYPE_CHECKING
|
|
15
|
+
from typing import TYPE_CHECKING, cast
|
|
16
16
|
|
|
17
17
|
import httpx
|
|
18
18
|
from typing_extensions import Literal
|
|
@@ -133,8 +133,6 @@ KnownModelName = Literal[
|
|
|
133
133
|
'gpt-4-turbo-2024-04-09',
|
|
134
134
|
'gpt-4-turbo-preview',
|
|
135
135
|
'gpt-4-vision-preview',
|
|
136
|
-
'gpt-4.5-preview',
|
|
137
|
-
'gpt-4.5-preview-2025-02-27',
|
|
138
136
|
'gpt-4o',
|
|
139
137
|
'gpt-4o-2024-05-13',
|
|
140
138
|
'gpt-4o-2024-08-06',
|
|
@@ -146,6 +144,10 @@ KnownModelName = Literal[
|
|
|
146
144
|
'gpt-4o-mini-2024-07-18',
|
|
147
145
|
'gpt-4o-mini-audio-preview',
|
|
148
146
|
'gpt-4o-mini-audio-preview-2024-12-17',
|
|
147
|
+
'gpt-4o-mini-search-preview',
|
|
148
|
+
'gpt-4o-mini-search-preview-2025-03-11',
|
|
149
|
+
'gpt-4o-search-preview',
|
|
150
|
+
'gpt-4o-search-preview-2025-03-11',
|
|
149
151
|
'groq:gemma2-9b-it',
|
|
150
152
|
'groq:llama-3.1-8b-instant',
|
|
151
153
|
'groq:llama-3.2-11b-vision-preview',
|
|
@@ -189,8 +191,6 @@ KnownModelName = Literal[
|
|
|
189
191
|
'openai:gpt-4-turbo-2024-04-09',
|
|
190
192
|
'openai:gpt-4-turbo-preview',
|
|
191
193
|
'openai:gpt-4-vision-preview',
|
|
192
|
-
'openai:gpt-4.5-preview',
|
|
193
|
-
'openai:gpt-4.5-preview-2025-02-27',
|
|
194
194
|
'openai:gpt-4o',
|
|
195
195
|
'openai:gpt-4o-2024-05-13',
|
|
196
196
|
'openai:gpt-4o-2024-08-06',
|
|
@@ -202,6 +202,10 @@ KnownModelName = Literal[
|
|
|
202
202
|
'openai:gpt-4o-mini-2024-07-18',
|
|
203
203
|
'openai:gpt-4o-mini-audio-preview',
|
|
204
204
|
'openai:gpt-4o-mini-audio-preview-2024-12-17',
|
|
205
|
+
'openai:gpt-4o-mini-search-preview',
|
|
206
|
+
'openai:gpt-4o-mini-search-preview-2025-03-11',
|
|
207
|
+
'openai:gpt-4o-search-preview',
|
|
208
|
+
'openai:gpt-4o-search-preview-2025-03-11',
|
|
205
209
|
'openai:o1',
|
|
206
210
|
'openai:o1-2024-12-17',
|
|
207
211
|
'openai:o1-mini',
|
|
@@ -379,6 +383,7 @@ def infer_model(model: Model | KnownModelName) -> Model:
|
|
|
379
383
|
|
|
380
384
|
try:
|
|
381
385
|
provider, model_name = model.split(':', maxsplit=1)
|
|
386
|
+
provider = cast(str, provider)
|
|
382
387
|
except ValueError:
|
|
383
388
|
model_name = model
|
|
384
389
|
# TODO(Marcelo): We should deprecate this way.
|
|
@@ -397,8 +402,7 @@ def infer_model(model: Model | KnownModelName) -> Model:
|
|
|
397
402
|
if provider == 'cohere':
|
|
398
403
|
from .cohere import CohereModel
|
|
399
404
|
|
|
400
|
-
|
|
401
|
-
return CohereModel(model_name)
|
|
405
|
+
return CohereModel(model_name, provider=provider)
|
|
402
406
|
elif provider in ('deepseek', 'openai'):
|
|
403
407
|
from .openai import OpenAIModel
|
|
404
408
|
|
|
@@ -410,22 +414,19 @@ def infer_model(model: Model | KnownModelName) -> Model:
|
|
|
410
414
|
elif provider == 'groq':
|
|
411
415
|
from .groq import GroqModel
|
|
412
416
|
|
|
413
|
-
|
|
414
|
-
return GroqModel(model_name)
|
|
417
|
+
return GroqModel(model_name, provider=provider)
|
|
415
418
|
elif provider == 'mistral':
|
|
416
419
|
from .mistral import MistralModel
|
|
417
420
|
|
|
418
|
-
|
|
419
|
-
return MistralModel(model_name)
|
|
421
|
+
return MistralModel(model_name, provider=provider)
|
|
420
422
|
elif provider == 'anthropic':
|
|
421
423
|
from .anthropic import AnthropicModel
|
|
422
424
|
|
|
423
|
-
|
|
424
|
-
return AnthropicModel(model_name)
|
|
425
|
+
return AnthropicModel(model_name, provider=provider)
|
|
425
426
|
elif provider == 'bedrock':
|
|
426
427
|
from .bedrock import BedrockConverseModel
|
|
427
428
|
|
|
428
|
-
return BedrockConverseModel(model_name)
|
|
429
|
+
return BedrockConverseModel(model_name, provider=provider)
|
|
429
430
|
else:
|
|
430
431
|
raise UserError(f'Unknown model: {model}')
|
|
431
432
|
|
|
@@ -65,7 +65,7 @@ try:
|
|
|
65
65
|
except ImportError as _import_error:
|
|
66
66
|
raise ImportError(
|
|
67
67
|
'Please install `anthropic` to use the Anthropic model, '
|
|
68
|
-
|
|
68
|
+
'you can use the `anthropic` optional group — `pip install "pydantic-ai-slim[anthropic]"`'
|
|
69
69
|
) from _import_error
|
|
70
70
|
|
|
71
71
|
LatestAnthropicModelNames = Literal[
|
|
@@ -3,11 +3,11 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
from collections.abc import Iterable
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
from itertools import chain
|
|
6
|
-
from typing import Literal, Union, cast
|
|
6
|
+
from typing import Literal, Union, cast, overload
|
|
7
7
|
|
|
8
8
|
from cohere import TextAssistantMessageContentItem
|
|
9
9
|
from httpx import AsyncClient as AsyncHTTPClient
|
|
10
|
-
from typing_extensions import assert_never
|
|
10
|
+
from typing_extensions import assert_never, deprecated
|
|
11
11
|
|
|
12
12
|
from .. import ModelHTTPError, result
|
|
13
13
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
@@ -23,11 +23,13 @@ from ..messages import (
|
|
|
23
23
|
ToolReturnPart,
|
|
24
24
|
UserPromptPart,
|
|
25
25
|
)
|
|
26
|
+
from ..providers import Provider, infer_provider
|
|
26
27
|
from ..settings import ModelSettings
|
|
27
28
|
from ..tools import ToolDefinition
|
|
28
29
|
from . import (
|
|
29
30
|
Model,
|
|
30
31
|
ModelRequestParameters,
|
|
32
|
+
cached_async_http_client,
|
|
31
33
|
check_allow_model_requests,
|
|
32
34
|
)
|
|
33
35
|
|
|
@@ -50,7 +52,7 @@ try:
|
|
|
50
52
|
except ImportError as _import_error:
|
|
51
53
|
raise ImportError(
|
|
52
54
|
'Please install `cohere` to use the Cohere model, '
|
|
53
|
-
|
|
55
|
+
'you can use the `cohere` optional group — `pip install "pydantic-ai-slim[cohere]"`'
|
|
54
56
|
) from _import_error
|
|
55
57
|
|
|
56
58
|
LatestCohereModelNames = Literal[
|
|
@@ -100,10 +102,34 @@ class CohereModel(Model):
|
|
|
100
102
|
_model_name: CohereModelName = field(repr=False)
|
|
101
103
|
_system: str = field(default='cohere', repr=False)
|
|
102
104
|
|
|
105
|
+
@overload
|
|
103
106
|
def __init__(
|
|
104
107
|
self,
|
|
105
108
|
model_name: CohereModelName,
|
|
106
109
|
*,
|
|
110
|
+
provider: Literal['cohere'] | Provider[AsyncClientV2] = 'cohere',
|
|
111
|
+
api_key: None = None,
|
|
112
|
+
cohere_client: None = None,
|
|
113
|
+
http_client: None = None,
|
|
114
|
+
) -> None: ...
|
|
115
|
+
|
|
116
|
+
@deprecated('Use the `provider` parameter instead of `api_key`, `cohere_client`, and `http_client`.')
|
|
117
|
+
@overload
|
|
118
|
+
def __init__(
|
|
119
|
+
self,
|
|
120
|
+
model_name: CohereModelName,
|
|
121
|
+
*,
|
|
122
|
+
provider: None = None,
|
|
123
|
+
api_key: str | None = None,
|
|
124
|
+
cohere_client: AsyncClientV2 | None = None,
|
|
125
|
+
http_client: AsyncHTTPClient | None = None,
|
|
126
|
+
) -> None: ...
|
|
127
|
+
|
|
128
|
+
def __init__(
|
|
129
|
+
self,
|
|
130
|
+
model_name: CohereModelName,
|
|
131
|
+
*,
|
|
132
|
+
provider: Literal['cohere'] | Provider[AsyncClientV2] | None = None,
|
|
107
133
|
api_key: str | None = None,
|
|
108
134
|
cohere_client: AsyncClientV2 | None = None,
|
|
109
135
|
http_client: AsyncHTTPClient | None = None,
|
|
@@ -113,6 +139,9 @@ class CohereModel(Model):
|
|
|
113
139
|
Args:
|
|
114
140
|
model_name: The name of the Cohere model to use. List of model names
|
|
115
141
|
available [here](https://docs.cohere.com/docs/models#command).
|
|
142
|
+
provider: The provider to use for authentication and API access. Can be either the string
|
|
143
|
+
'cohere' or an instance of `Provider[AsyncClientV2]`. If not provided, a new provider will be
|
|
144
|
+
created using the other parameters.
|
|
116
145
|
api_key: The API key to use for authentication, if not provided, the
|
|
117
146
|
`CO_API_KEY` environment variable will be used if available.
|
|
118
147
|
cohere_client: An existing Cohere async client to use. If provided,
|
|
@@ -120,12 +149,17 @@ class CohereModel(Model):
|
|
|
120
149
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
121
150
|
"""
|
|
122
151
|
self._model_name: CohereModelName = model_name
|
|
123
|
-
|
|
152
|
+
|
|
153
|
+
if provider is not None:
|
|
154
|
+
if isinstance(provider, str):
|
|
155
|
+
provider = infer_provider(provider)
|
|
156
|
+
self.client = provider.client
|
|
157
|
+
elif cohere_client is not None:
|
|
124
158
|
assert http_client is None, 'Cannot provide both `cohere_client` and `http_client`'
|
|
125
159
|
assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
|
|
126
160
|
self.client = cohere_client
|
|
127
161
|
else:
|
|
128
|
-
self.client = AsyncClientV2(api_key=api_key, httpx_client=http_client)
|
|
162
|
+
self.client = AsyncClientV2(api_key=api_key, httpx_client=http_client or cached_async_http_client())
|
|
129
163
|
|
|
130
164
|
@property
|
|
131
165
|
def base_url(self) -> str:
|
|
@@ -41,7 +41,7 @@ try:
|
|
|
41
41
|
except ImportError as _import_error:
|
|
42
42
|
raise ImportError(
|
|
43
43
|
'Please install `groq` to use the Groq model, '
|
|
44
|
-
|
|
44
|
+
'you can use the `groq` optional group — `pip install "pydantic-ai-slim[groq]"`'
|
|
45
45
|
) from _import_error
|
|
46
46
|
|
|
47
47
|
|
|
@@ -52,7 +52,9 @@ class InstrumentationSettings:
|
|
|
52
52
|
|
|
53
53
|
- `Agent(instrument=...)`
|
|
54
54
|
- [`Agent.instrument_all()`][pydantic_ai.agent.Agent.instrument_all]
|
|
55
|
-
- `InstrumentedModel`
|
|
55
|
+
- [`InstrumentedModel`][pydantic_ai.models.instrumented.InstrumentedModel]
|
|
56
|
+
|
|
57
|
+
See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info.
|
|
56
58
|
"""
|
|
57
59
|
|
|
58
60
|
tracer: Tracer = field(repr=False)
|
|
@@ -94,9 +96,13 @@ GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'
|
|
|
94
96
|
|
|
95
97
|
@dataclass
|
|
96
98
|
class InstrumentedModel(WrapperModel):
|
|
97
|
-
"""Model which
|
|
99
|
+
"""Model which wraps another model so that requests are instrumented with OpenTelemetry.
|
|
100
|
+
|
|
101
|
+
See the [Debugging and Monitoring guide](https://ai.pydantic.dev/logfire/) for more info.
|
|
102
|
+
"""
|
|
98
103
|
|
|
99
|
-
|
|
104
|
+
settings: InstrumentationSettings
|
|
105
|
+
"""Configuration for instrumenting requests."""
|
|
100
106
|
|
|
101
107
|
def __init__(
|
|
102
108
|
self,
|
|
@@ -104,7 +110,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
104
110
|
options: InstrumentationSettings | None = None,
|
|
105
111
|
) -> None:
|
|
106
112
|
super().__init__(wrapped)
|
|
107
|
-
self.
|
|
113
|
+
self.settings = options or InstrumentationSettings()
|
|
108
114
|
|
|
109
115
|
async def request(
|
|
110
116
|
self,
|
|
@@ -112,7 +118,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
112
118
|
model_settings: ModelSettings | None,
|
|
113
119
|
model_request_parameters: ModelRequestParameters,
|
|
114
120
|
) -> tuple[ModelResponse, Usage]:
|
|
115
|
-
with self._instrument(messages, model_settings) as finish:
|
|
121
|
+
with self._instrument(messages, model_settings, model_request_parameters) as finish:
|
|
116
122
|
response, usage = await super().request(messages, model_settings, model_request_parameters)
|
|
117
123
|
finish(response, usage)
|
|
118
124
|
return response, usage
|
|
@@ -124,7 +130,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
124
130
|
model_settings: ModelSettings | None,
|
|
125
131
|
model_request_parameters: ModelRequestParameters,
|
|
126
132
|
) -> AsyncIterator[StreamedResponse]:
|
|
127
|
-
with self._instrument(messages, model_settings) as finish:
|
|
133
|
+
with self._instrument(messages, model_settings, model_request_parameters) as finish:
|
|
128
134
|
response_stream: StreamedResponse | None = None
|
|
129
135
|
try:
|
|
130
136
|
async with super().request_stream(
|
|
@@ -140,6 +146,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
140
146
|
self,
|
|
141
147
|
messages: list[ModelMessage],
|
|
142
148
|
model_settings: ModelSettings | None,
|
|
149
|
+
model_request_parameters: ModelRequestParameters,
|
|
143
150
|
) -> Iterator[Callable[[ModelResponse, Usage], None]]:
|
|
144
151
|
operation = 'chat'
|
|
145
152
|
span_name = f'{operation} {self.model_name}'
|
|
@@ -149,6 +156,13 @@ class InstrumentedModel(WrapperModel):
|
|
|
149
156
|
attributes: dict[str, AttributeValue] = {
|
|
150
157
|
'gen_ai.operation.name': operation,
|
|
151
158
|
**self.model_attributes(self.wrapped),
|
|
159
|
+
'model_request_parameters': json.dumps(InstrumentedModel.serialize_any(model_request_parameters)),
|
|
160
|
+
'logfire.json_schema': json.dumps(
|
|
161
|
+
{
|
|
162
|
+
'type': 'object',
|
|
163
|
+
'properties': {'model_request_parameters': {'type': 'object'}},
|
|
164
|
+
}
|
|
165
|
+
),
|
|
152
166
|
}
|
|
153
167
|
|
|
154
168
|
if model_settings:
|
|
@@ -156,7 +170,7 @@ class InstrumentedModel(WrapperModel):
|
|
|
156
170
|
if isinstance(value := model_settings.get(key), (float, int)):
|
|
157
171
|
attributes[f'gen_ai.request.{key}'] = value
|
|
158
172
|
|
|
159
|
-
with self.
|
|
173
|
+
with self.settings.tracer.start_as_current_span(span_name, attributes=attributes) as span:
|
|
160
174
|
|
|
161
175
|
def finish(response: ModelResponse, usage: Usage):
|
|
162
176
|
if not span.is_recording():
|
|
@@ -190,9 +204,9 @@ class InstrumentedModel(WrapperModel):
|
|
|
190
204
|
yield finish
|
|
191
205
|
|
|
192
206
|
def _emit_events(self, span: Span, events: list[Event]) -> None:
|
|
193
|
-
if self.
|
|
207
|
+
if self.settings.event_mode == 'logs':
|
|
194
208
|
for event in events:
|
|
195
|
-
self.
|
|
209
|
+
self.settings.event_logger.emit(event)
|
|
196
210
|
else:
|
|
197
211
|
attr_name = 'events'
|
|
198
212
|
span.set_attributes(
|
|
@@ -201,7 +215,10 @@ class InstrumentedModel(WrapperModel):
|
|
|
201
215
|
'logfire.json_schema': json.dumps(
|
|
202
216
|
{
|
|
203
217
|
'type': 'object',
|
|
204
|
-
'properties': {
|
|
218
|
+
'properties': {
|
|
219
|
+
attr_name: {'type': 'array'},
|
|
220
|
+
'model_request_parameters': {'type': 'object'},
|
|
221
|
+
},
|
|
205
222
|
}
|
|
206
223
|
),
|
|
207
224
|
}
|
|
@@ -75,7 +75,7 @@ try:
|
|
|
75
75
|
except ImportError as e:
|
|
76
76
|
raise ImportError(
|
|
77
77
|
'Please install `mistral` to use the Mistral model, '
|
|
78
|
-
|
|
78
|
+
'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
|
|
79
79
|
) from e
|
|
80
80
|
|
|
81
81
|
LatestMistralModelNames = Literal[
|
|
@@ -57,7 +57,7 @@ try:
|
|
|
57
57
|
except ImportError as _import_error:
|
|
58
58
|
raise ImportError(
|
|
59
59
|
'Please install `openai` to use the OpenAI model, '
|
|
60
|
-
|
|
60
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
61
61
|
) from _import_error
|
|
62
62
|
|
|
63
63
|
OpenAIModelName = Union[str, ChatModel]
|
|
@@ -99,7 +99,7 @@ class OpenAIModel(Model):
|
|
|
99
99
|
system_prompt_role: OpenAISystemPromptRole | None = field(default=None)
|
|
100
100
|
|
|
101
101
|
_model_name: OpenAIModelName = field(repr=False)
|
|
102
|
-
_system: str = field(repr=False)
|
|
102
|
+
_system: str = field(default='openai', repr=False)
|
|
103
103
|
|
|
104
104
|
@overload
|
|
105
105
|
def __init__(
|
|
@@ -108,7 +108,6 @@ class OpenAIModel(Model):
|
|
|
108
108
|
*,
|
|
109
109
|
provider: Literal['openai', 'deepseek', 'azure'] | Provider[AsyncOpenAI] = 'openai',
|
|
110
110
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
111
|
-
system: str = 'openai',
|
|
112
111
|
) -> None: ...
|
|
113
112
|
|
|
114
113
|
@deprecated('Use the `provider` parameter instead of `base_url`, `api_key`, `openai_client` and `http_client`.')
|
|
@@ -123,7 +122,6 @@ class OpenAIModel(Model):
|
|
|
123
122
|
openai_client: AsyncOpenAI | None = None,
|
|
124
123
|
http_client: AsyncHTTPClient | None = None,
|
|
125
124
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
126
|
-
system: str = 'openai',
|
|
127
125
|
) -> None: ...
|
|
128
126
|
|
|
129
127
|
def __init__(
|
|
@@ -136,7 +134,6 @@ class OpenAIModel(Model):
|
|
|
136
134
|
openai_client: AsyncOpenAI | None = None,
|
|
137
135
|
http_client: AsyncHTTPClient | None = None,
|
|
138
136
|
system_prompt_role: OpenAISystemPromptRole | None = None,
|
|
139
|
-
system: str = 'openai',
|
|
140
137
|
):
|
|
141
138
|
"""Initialize an OpenAI model.
|
|
142
139
|
|
|
@@ -155,8 +152,6 @@ class OpenAIModel(Model):
|
|
|
155
152
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
156
153
|
system_prompt_role: The role to use for the system prompt message. If not provided, defaults to `'system'`.
|
|
157
154
|
In the future, this may be inferred from the model name.
|
|
158
|
-
system: The model provider used, defaults to `openai`. This is for observability purposes, you must
|
|
159
|
-
customize the `base_url` and `api_key` to use a different provider.
|
|
160
155
|
"""
|
|
161
156
|
self._model_name = model_name
|
|
162
157
|
|
|
@@ -185,7 +180,6 @@ class OpenAIModel(Model):
|
|
|
185
180
|
else:
|
|
186
181
|
self.client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=cached_async_http_client())
|
|
187
182
|
self.system_prompt_role = system_prompt_role
|
|
188
|
-
self._system = system
|
|
189
183
|
|
|
190
184
|
@property
|
|
191
185
|
def base_url(self) -> str:
|
|
@@ -279,7 +273,7 @@ class OpenAIModel(Model):
|
|
|
279
273
|
tool_choice=tool_choice or NOT_GIVEN,
|
|
280
274
|
stream=stream,
|
|
281
275
|
stream_options={'include_usage': True} if stream else NOT_GIVEN,
|
|
282
|
-
|
|
276
|
+
max_completion_tokens=model_settings.get('max_tokens', NOT_GIVEN),
|
|
283
277
|
temperature=model_settings.get('temperature', NOT_GIVEN),
|
|
284
278
|
top_p=model_settings.get('top_p', NOT_GIVEN),
|
|
285
279
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
@@ -27,7 +27,7 @@ try:
|
|
|
27
27
|
except ImportError as _import_error:
|
|
28
28
|
raise ImportError(
|
|
29
29
|
'Please install `google-auth` to use the VertexAI model, '
|
|
30
|
-
|
|
30
|
+
'you can use the `vertexai` optional group — `pip install "pydantic-ai-slim[vertexai]"`'
|
|
31
31
|
) from _import_error
|
|
32
32
|
|
|
33
33
|
VERTEX_AI_URL_TEMPLATE = (
|
|
@@ -13,9 +13,13 @@ from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, i
|
|
|
13
13
|
|
|
14
14
|
@dataclass(init=False)
|
|
15
15
|
class WrapperModel(Model):
|
|
16
|
-
"""Model which wraps another model.
|
|
16
|
+
"""Model which wraps another model.
|
|
17
|
+
|
|
18
|
+
Does nothing on its own, used as a base class.
|
|
19
|
+
"""
|
|
17
20
|
|
|
18
21
|
wrapped: Model
|
|
22
|
+
"""The underlying model being wrapped."""
|
|
19
23
|
|
|
20
24
|
def __init__(self, wrapped: Model | KnownModelName):
|
|
21
25
|
self.wrapped = infer_model(wrapped)
|
|
@@ -77,5 +77,9 @@ def infer_provider(provider: str) -> Provider[Any]:
|
|
|
77
77
|
from .mistral import MistralProvider
|
|
78
78
|
|
|
79
79
|
return MistralProvider()
|
|
80
|
+
elif provider == 'cohere':
|
|
81
|
+
from .cohere import CohereProvider
|
|
82
|
+
|
|
83
|
+
return CohereProvider()
|
|
80
84
|
else: # pragma: no cover
|
|
81
85
|
raise ValueError(f'Unknown provider: {provider}')
|
|
@@ -12,7 +12,7 @@ try:
|
|
|
12
12
|
except ImportError as _import_error: # pragma: no cover
|
|
13
13
|
raise ImportError(
|
|
14
14
|
'Please install the `anthropic` package to use the Anthropic provider, '
|
|
15
|
-
|
|
15
|
+
'you can use the `anthropic` optional group — `pip install "pydantic-ai-slim[anthropic]"`'
|
|
16
16
|
) from _import_error
|
|
17
17
|
|
|
18
18
|
|
|
@@ -13,7 +13,7 @@ try:
|
|
|
13
13
|
except ImportError as _import_error: # pragma: no cover
|
|
14
14
|
raise ImportError(
|
|
15
15
|
'Please install the `openai` package to use the Azure provider, '
|
|
16
|
-
|
|
16
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
17
17
|
) from _import_error
|
|
18
18
|
|
|
19
19
|
|
|
@@ -11,7 +11,7 @@ try:
|
|
|
11
11
|
except ImportError as _import_error:
|
|
12
12
|
raise ImportError(
|
|
13
13
|
'Please install the `boto3` package to use the Bedrock provider, '
|
|
14
|
-
|
|
14
|
+
'you can use the `bedrock` optional group — `pip install "pydantic-ai-slim[bedrock]"`'
|
|
15
15
|
) from _import_error
|
|
16
16
|
|
|
17
17
|
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from __future__ import annotations as _annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from httpx import AsyncClient as AsyncHTTPClient
|
|
6
|
+
|
|
7
|
+
from pydantic_ai.models import cached_async_http_client
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
from cohere import AsyncClientV2
|
|
11
|
+
except ImportError as _import_error: # pragma: no cover
|
|
12
|
+
raise ImportError(
|
|
13
|
+
'Please install the `cohere` package to use the Cohere provider, '
|
|
14
|
+
'you can use the `cohere` optional group — `pip install "pydantic-ai-slim[cohere]"`'
|
|
15
|
+
) from _import_error
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
from . import Provider
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class CohereProvider(Provider[AsyncClientV2]):
|
|
22
|
+
"""Provider for Cohere API."""
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def name(self) -> str:
|
|
26
|
+
return 'cohere'
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def base_url(self) -> str:
|
|
30
|
+
client_wrapper = self.client._client_wrapper # type: ignore
|
|
31
|
+
return str(client_wrapper.get_base_url())
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def client(self) -> AsyncClientV2:
|
|
35
|
+
return self._client
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
api_key: str | None = None,
|
|
41
|
+
cohere_client: AsyncClientV2 | None = None,
|
|
42
|
+
http_client: AsyncHTTPClient | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Create a new Cohere provider.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
api_key: The API key to use for authentication, if not provided, the `CO_API_KEY` environment variable
|
|
48
|
+
will be used if available.
|
|
49
|
+
cohere_client: An existing
|
|
50
|
+
[AsyncClientV2](https://github.com/cohere-ai/cohere-python)
|
|
51
|
+
client to use. If provided, `api_key` and `http_client` must be `None`.
|
|
52
|
+
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
53
|
+
"""
|
|
54
|
+
if cohere_client is not None:
|
|
55
|
+
assert http_client is None, 'Cannot provide both `cohere_client` and `http_client`'
|
|
56
|
+
assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
|
|
57
|
+
self._client = cohere_client
|
|
58
|
+
else:
|
|
59
|
+
api_key = api_key or os.environ.get('CO_API_KEY')
|
|
60
|
+
if api_key is None:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
'Set the `CO_API_KEY` environment variable or pass it via `CohereProvider(api_key=...)`'
|
|
63
|
+
'to use the Cohere provider.'
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
base_url = os.environ.get('CO_BASE_URL')
|
|
67
|
+
if http_client is not None:
|
|
68
|
+
self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
|
|
69
|
+
else:
|
|
70
|
+
self._client = AsyncClientV2(
|
|
71
|
+
api_key=api_key, httpx_client=cached_async_http_client(), base_url=base_url
|
|
72
|
+
)
|
|
@@ -13,7 +13,7 @@ try:
|
|
|
13
13
|
except ImportError as _import_error: # pragma: no cover
|
|
14
14
|
raise ImportError(
|
|
15
15
|
'Please install the `openai` package to use the DeepSeek provider, '
|
|
16
|
-
|
|
16
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
17
17
|
) from _import_error
|
|
18
18
|
|
|
19
19
|
from . import Provider
|
|
@@ -2,7 +2,6 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import functools
|
|
4
4
|
from collections.abc import AsyncGenerator, Mapping
|
|
5
|
-
from datetime import datetime, timedelta
|
|
6
5
|
from pathlib import Path
|
|
7
6
|
from typing import Literal, overload
|
|
8
7
|
|
|
@@ -22,15 +21,12 @@ try:
|
|
|
22
21
|
except ImportError as _import_error:
|
|
23
22
|
raise ImportError(
|
|
24
23
|
'Please install the `google-auth` package to use the Google Vertex AI provider, '
|
|
25
|
-
|
|
24
|
+
'you can use the `vertexai` optional group — `pip install "pydantic-ai-slim[vertexai]"`'
|
|
26
25
|
) from _import_error
|
|
27
26
|
|
|
28
27
|
|
|
29
28
|
__all__ = ('GoogleVertexProvider',)
|
|
30
29
|
|
|
31
|
-
# default expiry is 3600 seconds
|
|
32
|
-
MAX_TOKEN_AGE = timedelta(seconds=3000)
|
|
33
|
-
|
|
34
30
|
|
|
35
31
|
class GoogleVertexProvider(Provider[httpx.AsyncClient]):
|
|
36
32
|
"""Provider for Vertex AI API."""
|
|
@@ -131,19 +127,21 @@ class _VertexAIAuth(httpx.Auth):
|
|
|
131
127
|
self.region = region
|
|
132
128
|
|
|
133
129
|
self.credentials = None
|
|
134
|
-
self.token_created: datetime | None = None
|
|
135
130
|
|
|
136
131
|
async def async_auth_flow(self, request: httpx.Request) -> AsyncGenerator[httpx.Request, httpx.Response]:
|
|
137
132
|
if self.credentials is None:
|
|
138
133
|
self.credentials = await self._get_credentials()
|
|
139
|
-
if self.credentials.token is None
|
|
140
|
-
await
|
|
141
|
-
self.token_created = datetime.now()
|
|
134
|
+
if self.credentials.token is None: # type: ignore[reportUnknownMemberType]
|
|
135
|
+
await self._refresh_token()
|
|
142
136
|
request.headers['Authorization'] = f'Bearer {self.credentials.token}' # type: ignore[reportUnknownMemberType]
|
|
143
|
-
|
|
144
137
|
# NOTE: This workaround is in place because we might get the project_id from the credentials.
|
|
145
138
|
request.url = httpx.URL(str(request.url).replace('projects/None', f'projects/{self.project_id}'))
|
|
146
|
-
yield request
|
|
139
|
+
response = yield request
|
|
140
|
+
|
|
141
|
+
if response.status_code == 401:
|
|
142
|
+
await self._refresh_token()
|
|
143
|
+
request.headers['Authorization'] = f'Bearer {self.credentials.token}' # type: ignore[reportUnknownMemberType]
|
|
144
|
+
yield request
|
|
147
145
|
|
|
148
146
|
async def _get_credentials(self) -> BaseCredentials | ServiceAccountCredentials:
|
|
149
147
|
if self.service_account_file is not None:
|
|
@@ -166,15 +164,9 @@ class _VertexAIAuth(httpx.Auth):
|
|
|
166
164
|
self.project_id = creds_project_id
|
|
167
165
|
return creds
|
|
168
166
|
|
|
169
|
-
def
|
|
170
|
-
if self.token_created is None:
|
|
171
|
-
return True
|
|
172
|
-
else:
|
|
173
|
-
return (datetime.now() - self.token_created) > MAX_TOKEN_AGE
|
|
174
|
-
|
|
175
|
-
def _refresh_token(self) -> str: # pragma: no cover
|
|
167
|
+
async def _refresh_token(self) -> str: # pragma: no cover
|
|
176
168
|
assert self.credentials is not None
|
|
177
|
-
self.credentials.refresh
|
|
169
|
+
await anyio.to_thread.run_sync(self.credentials.refresh, Request()) # type: ignore[reportUnknownMemberType]
|
|
178
170
|
assert isinstance(self.credentials.token, str), f'Expected token to be a string, got {self.credentials.token}' # type: ignore[reportUnknownMemberType]
|
|
179
171
|
return self.credentials.token
|
|
180
172
|
|
|
@@ -12,7 +12,7 @@ try:
|
|
|
12
12
|
except ImportError as _import_error: # pragma: no cover
|
|
13
13
|
raise ImportError(
|
|
14
14
|
'Please install the `groq` package to use the Groq provider, '
|
|
15
|
-
|
|
15
|
+
'you can use the `groq` optional group — `pip install "pydantic-ai-slim[groq]"`'
|
|
16
16
|
) from _import_error
|
|
17
17
|
|
|
18
18
|
|
|
@@ -12,7 +12,7 @@ try:
|
|
|
12
12
|
except ImportError as e: # pragma: no cover
|
|
13
13
|
raise ImportError(
|
|
14
14
|
'Please install the `mistral` package to use the Mistral provider, '
|
|
15
|
-
|
|
15
|
+
'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
|
|
16
16
|
) from e
|
|
17
17
|
|
|
18
18
|
|
|
@@ -11,7 +11,7 @@ try:
|
|
|
11
11
|
except ImportError as _import_error: # pragma: no cover
|
|
12
12
|
raise ImportError(
|
|
13
13
|
'Please install the `openai` package to use the OpenAI provider, '
|
|
14
|
-
|
|
14
|
+
'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
|
|
15
15
|
) from _import_error
|
|
16
16
|
|
|
17
17
|
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai-slim"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.44"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
|
|
9
9
|
authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
|
|
10
10
|
license = "MIT"
|
|
@@ -36,7 +36,7 @@ dependencies = [
|
|
|
36
36
|
"griffe>=1.3.2",
|
|
37
37
|
"httpx>=0.27",
|
|
38
38
|
"pydantic>=2.10",
|
|
39
|
-
"pydantic-graph==0.0.
|
|
39
|
+
"pydantic-graph==0.0.44",
|
|
40
40
|
"exceptiongroup; python_version < '3.11'",
|
|
41
41
|
"opentelemetry-api>=1.28.0",
|
|
42
42
|
"typing-inspection>=0.4.0",
|
|
@@ -46,7 +46,7 @@ dependencies = [
|
|
|
46
46
|
# WARNING if you add optional groups, please update docs/install.md
|
|
47
47
|
logfire = ["logfire>=2.3"]
|
|
48
48
|
# Models
|
|
49
|
-
openai = ["openai>=1.
|
|
49
|
+
openai = ["openai>=1.67.0"]
|
|
50
50
|
cohere = ["cohere>=5.13.11"]
|
|
51
51
|
vertexai = ["google-auth>=2.36.0", "requests>=2.32.3"]
|
|
52
52
|
anthropic = ["anthropic>=0.49.0"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|