pydantic-ai-slim 0.0.36__tar.gz → 0.0.37__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/PKG-INFO +2 -2
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/agent.py +18 -4
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/messages.py +3 -1
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/__init__.py +5 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/anthropic.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/bedrock.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/cohere.py +5 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/fallback.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/gemini.py +3 -2
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/groq.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/instrumented.py +10 -2
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/mistral.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/openai.py +4 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/google_vertex.py +49 -4
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pyproject.toml +2 -2
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/.gitignore +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/README.md +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_result.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/vertexai.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.0.36 → pydantic_ai_slim-0.0.37}/pydantic_ai/usage.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.37
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.0.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.0.37
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -922,6 +922,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
922
922
|
self,
|
|
923
923
|
/,
|
|
924
924
|
*,
|
|
925
|
+
name: str | None = None,
|
|
925
926
|
retries: int | None = None,
|
|
926
927
|
prepare: ToolPrepareFunc[AgentDepsT] | None = None,
|
|
927
928
|
docstring_format: DocstringFormat = 'auto',
|
|
@@ -933,6 +934,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
933
934
|
func: ToolFuncContext[AgentDepsT, ToolParams] | None = None,
|
|
934
935
|
/,
|
|
935
936
|
*,
|
|
937
|
+
name: str | None = None,
|
|
936
938
|
retries: int | None = None,
|
|
937
939
|
prepare: ToolPrepareFunc[AgentDepsT] | None = None,
|
|
938
940
|
docstring_format: DocstringFormat = 'auto',
|
|
@@ -969,6 +971,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
969
971
|
|
|
970
972
|
Args:
|
|
971
973
|
func: The tool function to register.
|
|
974
|
+
name: The name of the tool, defaults to the function name.
|
|
972
975
|
retries: The number of retries to allow for this tool, defaults to the agent's default retries,
|
|
973
976
|
which defaults to 1.
|
|
974
977
|
prepare: custom method to prepare the tool definition for each step, return `None` to omit this
|
|
@@ -984,13 +987,17 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
984
987
|
func_: ToolFuncContext[AgentDepsT, ToolParams],
|
|
985
988
|
) -> ToolFuncContext[AgentDepsT, ToolParams]:
|
|
986
989
|
# noinspection PyTypeChecker
|
|
987
|
-
self._register_function(
|
|
990
|
+
self._register_function(
|
|
991
|
+
func_, True, name, retries, prepare, docstring_format, require_parameter_descriptions
|
|
992
|
+
)
|
|
988
993
|
return func_
|
|
989
994
|
|
|
990
995
|
return tool_decorator
|
|
991
996
|
else:
|
|
992
997
|
# noinspection PyTypeChecker
|
|
993
|
-
self._register_function(
|
|
998
|
+
self._register_function(
|
|
999
|
+
func, True, name, retries, prepare, docstring_format, require_parameter_descriptions
|
|
1000
|
+
)
|
|
994
1001
|
return func
|
|
995
1002
|
|
|
996
1003
|
@overload
|
|
@@ -1001,6 +1008,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1001
1008
|
self,
|
|
1002
1009
|
/,
|
|
1003
1010
|
*,
|
|
1011
|
+
name: str | None = None,
|
|
1004
1012
|
retries: int | None = None,
|
|
1005
1013
|
prepare: ToolPrepareFunc[AgentDepsT] | None = None,
|
|
1006
1014
|
docstring_format: DocstringFormat = 'auto',
|
|
@@ -1012,6 +1020,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1012
1020
|
func: ToolFuncPlain[ToolParams] | None = None,
|
|
1013
1021
|
/,
|
|
1014
1022
|
*,
|
|
1023
|
+
name: str | None = None,
|
|
1015
1024
|
retries: int | None = None,
|
|
1016
1025
|
prepare: ToolPrepareFunc[AgentDepsT] | None = None,
|
|
1017
1026
|
docstring_format: DocstringFormat = 'auto',
|
|
@@ -1048,6 +1057,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1048
1057
|
|
|
1049
1058
|
Args:
|
|
1050
1059
|
func: The tool function to register.
|
|
1060
|
+
name: The name of the tool, defaults to the function name.
|
|
1051
1061
|
retries: The number of retries to allow for this tool, defaults to the agent's default retries,
|
|
1052
1062
|
which defaults to 1.
|
|
1053
1063
|
prepare: custom method to prepare the tool definition for each step, return `None` to omit this
|
|
@@ -1062,19 +1072,22 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1062
1072
|
def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams]:
|
|
1063
1073
|
# noinspection PyTypeChecker
|
|
1064
1074
|
self._register_function(
|
|
1065
|
-
func_, False, retries, prepare, docstring_format, require_parameter_descriptions
|
|
1075
|
+
func_, False, name, retries, prepare, docstring_format, require_parameter_descriptions
|
|
1066
1076
|
)
|
|
1067
1077
|
return func_
|
|
1068
1078
|
|
|
1069
1079
|
return tool_decorator
|
|
1070
1080
|
else:
|
|
1071
|
-
self._register_function(
|
|
1081
|
+
self._register_function(
|
|
1082
|
+
func, False, name, retries, prepare, docstring_format, require_parameter_descriptions
|
|
1083
|
+
)
|
|
1072
1084
|
return func
|
|
1073
1085
|
|
|
1074
1086
|
def _register_function(
|
|
1075
1087
|
self,
|
|
1076
1088
|
func: ToolFuncEither[AgentDepsT, ToolParams],
|
|
1077
1089
|
takes_ctx: bool,
|
|
1090
|
+
name: str | None,
|
|
1078
1091
|
retries: int | None,
|
|
1079
1092
|
prepare: ToolPrepareFunc[AgentDepsT] | None,
|
|
1080
1093
|
docstring_format: DocstringFormat,
|
|
@@ -1085,6 +1098,7 @@ class Agent(Generic[AgentDepsT, ResultDataT]):
|
|
|
1085
1098
|
tool = Tool[AgentDepsT](
|
|
1086
1099
|
func,
|
|
1087
1100
|
takes_ctx=takes_ctx,
|
|
1101
|
+
name=name,
|
|
1088
1102
|
max_retries=retries_,
|
|
1089
1103
|
prepare=prepare,
|
|
1090
1104
|
docstring_format=docstring_format,
|
|
@@ -395,7 +395,9 @@ class ModelResponse:
|
|
|
395
395
|
ModelMessage = Annotated[Union[ModelRequest, ModelResponse], pydantic.Discriminator('kind')]
|
|
396
396
|
"""Any message sent to or returned by a model."""
|
|
397
397
|
|
|
398
|
-
ModelMessagesTypeAdapter = pydantic.TypeAdapter(
|
|
398
|
+
ModelMessagesTypeAdapter = pydantic.TypeAdapter(
|
|
399
|
+
list[ModelMessage], config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64')
|
|
400
|
+
)
|
|
399
401
|
"""Pydantic [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] for (de)serializing messages."""
|
|
400
402
|
|
|
401
403
|
|
|
@@ -266,6 +266,11 @@ class Model(ABC):
|
|
|
266
266
|
"""The system / model provider, ex: openai."""
|
|
267
267
|
raise NotImplementedError()
|
|
268
268
|
|
|
269
|
+
@property
|
|
270
|
+
def base_url(self) -> str | None:
|
|
271
|
+
"""The base URL for the provider API, if available."""
|
|
272
|
+
return None
|
|
273
|
+
|
|
269
274
|
|
|
270
275
|
@dataclass
|
|
271
276
|
class StreamedResponse(ABC):
|
|
@@ -143,6 +143,10 @@ class AnthropicModel(Model):
|
|
|
143
143
|
else:
|
|
144
144
|
self.client = AsyncAnthropic(api_key=api_key, http_client=cached_async_http_client())
|
|
145
145
|
|
|
146
|
+
@property
|
|
147
|
+
def base_url(self) -> str:
|
|
148
|
+
return str(self.client.base_url)
|
|
149
|
+
|
|
146
150
|
async def request(
|
|
147
151
|
self,
|
|
148
152
|
messages: list[ModelMessage],
|
|
@@ -127,6 +127,11 @@ class CohereModel(Model):
|
|
|
127
127
|
else:
|
|
128
128
|
self.client = AsyncClientV2(api_key=api_key, httpx_client=http_client)
|
|
129
129
|
|
|
130
|
+
@property
|
|
131
|
+
def base_url(self) -> str:
|
|
132
|
+
client_wrapper = self.client._client_wrapper # type: ignore
|
|
133
|
+
return str(client_wrapper.get_base_url())
|
|
134
|
+
|
|
130
135
|
async def request(
|
|
131
136
|
self,
|
|
132
137
|
messages: list[ModelMessage],
|
|
@@ -106,6 +106,10 @@ class FallbackModel(Model):
|
|
|
106
106
|
"""The system / model provider, n/a for fallback models."""
|
|
107
107
|
return None
|
|
108
108
|
|
|
109
|
+
@property
|
|
110
|
+
def base_url(self) -> str | None:
|
|
111
|
+
return self.models[0].base_url
|
|
112
|
+
|
|
109
113
|
|
|
110
114
|
def _default_fallback_condition_factory(exceptions: tuple[type[Exception], ...]) -> Callable[[Exception], bool]:
|
|
111
115
|
"""Create a default fallback condition for the given exceptions."""
|
|
@@ -143,6 +143,7 @@ class GeminiModel(Model):
|
|
|
143
143
|
else:
|
|
144
144
|
self._system = provider.name
|
|
145
145
|
self.client = provider.client
|
|
146
|
+
self._url = str(self.client.base_url)
|
|
146
147
|
else:
|
|
147
148
|
if api_key is None:
|
|
148
149
|
if env_api_key := os.getenv('GEMINI_API_KEY'):
|
|
@@ -159,7 +160,7 @@ class GeminiModel(Model):
|
|
|
159
160
|
return self._auth
|
|
160
161
|
|
|
161
162
|
@property
|
|
162
|
-
def
|
|
163
|
+
def base_url(self) -> str:
|
|
163
164
|
assert self._url is not None, 'URL not initialized'
|
|
164
165
|
return self._url
|
|
165
166
|
|
|
@@ -257,7 +258,7 @@ class GeminiModel(Model):
|
|
|
257
258
|
'User-Agent': get_user_agent(),
|
|
258
259
|
}
|
|
259
260
|
if self._provider is None: # pragma: no cover
|
|
260
|
-
url = self.
|
|
261
|
+
url = self.base_url + ('streamGenerateContent' if streamed else 'generateContent')
|
|
261
262
|
headers.update(await self.auth.headers())
|
|
262
263
|
else:
|
|
263
264
|
url = f'/{self._model_name}:{"streamGenerateContent" if streamed else "generateContent"}'
|
|
@@ -123,6 +123,10 @@ class GroqModel(Model):
|
|
|
123
123
|
else:
|
|
124
124
|
self.client = AsyncGroq(api_key=api_key, http_client=cached_async_http_client())
|
|
125
125
|
|
|
126
|
+
@property
|
|
127
|
+
def base_url(self) -> str:
|
|
128
|
+
return str(self.client.base_url)
|
|
129
|
+
|
|
126
130
|
async def request(
|
|
127
131
|
self,
|
|
128
132
|
messages: list[ModelMessage],
|
|
@@ -5,6 +5,7 @@ from collections.abc import AsyncIterator, Iterator, Mapping
|
|
|
5
5
|
from contextlib import asynccontextmanager, contextmanager
|
|
6
6
|
from dataclasses import dataclass, field
|
|
7
7
|
from typing import Any, Callable, Literal
|
|
8
|
+
from urllib.parse import urlparse
|
|
8
9
|
|
|
9
10
|
from opentelemetry._events import Event, EventLogger, EventLoggerProvider, get_event_logger_provider
|
|
10
11
|
from opentelemetry.trace import Span, Tracer, TracerProvider, get_tracer_provider
|
|
@@ -142,8 +143,6 @@ class InstrumentedModel(WrapperModel):
|
|
|
142
143
|
system = getattr(self.wrapped, 'system', '') or self.wrapped.__class__.__name__.removesuffix('Model').lower()
|
|
143
144
|
system = {'google-gla': 'gemini', 'google-vertex': 'vertex_ai', 'mistral': 'mistral_ai'}.get(system, system)
|
|
144
145
|
# TODO Missing attributes:
|
|
145
|
-
# - server.address: requires a Model.base_url abstract method or similar
|
|
146
|
-
# - server.port: to parse from the base_url
|
|
147
146
|
# - error.type: unclear if we should do something here or just always rely on span exceptions
|
|
148
147
|
# - gen_ai.request.stop_sequences/top_k: model_settings doesn't include these
|
|
149
148
|
attributes: dict[str, AttributeValue] = {
|
|
@@ -151,6 +150,15 @@ class InstrumentedModel(WrapperModel):
|
|
|
151
150
|
'gen_ai.system': system,
|
|
152
151
|
'gen_ai.request.model': model_name,
|
|
153
152
|
}
|
|
153
|
+
if base_url := self.wrapped.base_url:
|
|
154
|
+
try:
|
|
155
|
+
parsed = urlparse(base_url)
|
|
156
|
+
if parsed.hostname:
|
|
157
|
+
attributes['server.address'] = parsed.hostname
|
|
158
|
+
if parsed.port:
|
|
159
|
+
attributes['server.port'] = parsed.port
|
|
160
|
+
except Exception: # pragma: no cover
|
|
161
|
+
pass
|
|
154
162
|
|
|
155
163
|
if model_settings:
|
|
156
164
|
for key in MODEL_SETTING_ATTRIBUTES:
|
|
@@ -140,6 +140,10 @@ class MistralModel(Model):
|
|
|
140
140
|
api_key = os.getenv('MISTRAL_API_KEY') if api_key is None else api_key
|
|
141
141
|
self.client = Mistral(api_key=api_key, async_client=http_client or cached_async_http_client())
|
|
142
142
|
|
|
143
|
+
@property
|
|
144
|
+
def base_url(self) -> str:
|
|
145
|
+
return str(self.client.sdk_configuration.get_server_details()[0])
|
|
146
|
+
|
|
143
147
|
async def request(
|
|
144
148
|
self,
|
|
145
149
|
messages: list[ModelMessage],
|
|
@@ -187,6 +187,10 @@ class OpenAIModel(Model):
|
|
|
187
187
|
self.system_prompt_role = system_prompt_role
|
|
188
188
|
self._system = system
|
|
189
189
|
|
|
190
|
+
@property
|
|
191
|
+
def base_url(self) -> str:
|
|
192
|
+
return str(self.client.base_url)
|
|
193
|
+
|
|
190
194
|
async def request(
|
|
191
195
|
self,
|
|
192
196
|
messages: list[ModelMessage],
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import functools
|
|
4
|
-
from collections.abc import AsyncGenerator
|
|
4
|
+
from collections.abc import AsyncGenerator, Mapping
|
|
5
5
|
from datetime import datetime, timedelta
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Literal
|
|
7
|
+
from typing import Literal, overload
|
|
8
8
|
|
|
9
9
|
import anyio.to_thread
|
|
10
10
|
import httpx
|
|
@@ -52,19 +52,45 @@ class GoogleVertexProvider(Provider[httpx.AsyncClient]):
|
|
|
52
52
|
def client(self) -> httpx.AsyncClient:
|
|
53
53
|
return self._client
|
|
54
54
|
|
|
55
|
+
@overload
|
|
55
56
|
def __init__(
|
|
56
57
|
self,
|
|
58
|
+
*,
|
|
57
59
|
service_account_file: Path | str | None = None,
|
|
58
60
|
project_id: str | None = None,
|
|
59
61
|
region: VertexAiRegion = 'us-central1',
|
|
60
62
|
model_publisher: str = 'google',
|
|
61
63
|
http_client: httpx.AsyncClient | None = None,
|
|
64
|
+
) -> None: ...
|
|
65
|
+
|
|
66
|
+
@overload
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
*,
|
|
70
|
+
service_account_info: Mapping[str, str] | None = None,
|
|
71
|
+
project_id: str | None = None,
|
|
72
|
+
region: VertexAiRegion = 'us-central1',
|
|
73
|
+
model_publisher: str = 'google',
|
|
74
|
+
http_client: httpx.AsyncClient | None = None,
|
|
75
|
+
) -> None: ...
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
*,
|
|
80
|
+
service_account_file: Path | str | None = None,
|
|
81
|
+
service_account_info: Mapping[str, str] | None = None,
|
|
82
|
+
project_id: str | None = None,
|
|
83
|
+
region: VertexAiRegion = 'us-central1',
|
|
84
|
+
model_publisher: str = 'google',
|
|
85
|
+
http_client: httpx.AsyncClient | None = None,
|
|
62
86
|
) -> None:
|
|
63
87
|
"""Create a new Vertex AI provider.
|
|
64
88
|
|
|
65
89
|
Args:
|
|
66
90
|
service_account_file: Path to a service account file.
|
|
67
|
-
If not provided, the default environment credentials will be used.
|
|
91
|
+
If not provided, the service_account_info or default environment credentials will be used.
|
|
92
|
+
service_account_info: The loaded service_account_file contents.
|
|
93
|
+
If not provided, the service_account_file or default environment credentials will be used.
|
|
68
94
|
project_id: The project ID to use, if not provided it will be taken from the credentials.
|
|
69
95
|
region: The region to make requests to.
|
|
70
96
|
model_publisher: The model publisher to use, I couldn't find a good list of available publishers,
|
|
@@ -73,13 +99,17 @@ class GoogleVertexProvider(Provider[httpx.AsyncClient]):
|
|
|
73
99
|
Please create an issue or PR if you know how to use other publishers.
|
|
74
100
|
http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
|
|
75
101
|
"""
|
|
102
|
+
if service_account_file and service_account_info:
|
|
103
|
+
raise ValueError('Only one of `service_account_file` or `service_account_info` can be provided.')
|
|
104
|
+
|
|
76
105
|
self._client = http_client or cached_async_http_client()
|
|
77
106
|
self.service_account_file = service_account_file
|
|
107
|
+
self.service_account_info = service_account_info
|
|
78
108
|
self.project_id = project_id
|
|
79
109
|
self.region = region
|
|
80
110
|
self.model_publisher = model_publisher
|
|
81
111
|
|
|
82
|
-
self._client.auth = _VertexAIAuth(service_account_file, project_id, region)
|
|
112
|
+
self._client.auth = _VertexAIAuth(service_account_file, service_account_info, project_id, region)
|
|
83
113
|
self._client.base_url = self.base_url
|
|
84
114
|
|
|
85
115
|
|
|
@@ -91,10 +121,12 @@ class _VertexAIAuth(httpx.Auth):
|
|
|
91
121
|
def __init__(
|
|
92
122
|
self,
|
|
93
123
|
service_account_file: Path | str | None = None,
|
|
124
|
+
service_account_info: Mapping[str, str] | None = None,
|
|
94
125
|
project_id: str | None = None,
|
|
95
126
|
region: VertexAiRegion = 'us-central1',
|
|
96
127
|
) -> None:
|
|
97
128
|
self.service_account_file = service_account_file
|
|
129
|
+
self.service_account_info = service_account_info
|
|
98
130
|
self.project_id = project_id
|
|
99
131
|
self.region = region
|
|
100
132
|
|
|
@@ -119,6 +151,11 @@ class _VertexAIAuth(httpx.Auth):
|
|
|
119
151
|
assert creds.project_id is None or isinstance(creds.project_id, str) # type: ignore[reportUnknownMemberType]
|
|
120
152
|
creds_project_id: str | None = creds.project_id
|
|
121
153
|
creds_source = 'service account file'
|
|
154
|
+
elif self.service_account_info is not None:
|
|
155
|
+
creds = await _creds_from_info(self.service_account_info)
|
|
156
|
+
assert creds.project_id is None or isinstance(creds.project_id, str) # type: ignore[reportUnknownMemberType]
|
|
157
|
+
creds_project_id: str | None = creds.project_id
|
|
158
|
+
creds_source = 'service account info'
|
|
122
159
|
else:
|
|
123
160
|
creds, creds_project_id = await _async_google_auth()
|
|
124
161
|
creds_source = '`google.auth.default()`'
|
|
@@ -154,6 +191,14 @@ async def _creds_from_file(service_account_file: str | Path) -> ServiceAccountCr
|
|
|
154
191
|
return await anyio.to_thread.run_sync(service_account_credentials_from_file, str(service_account_file))
|
|
155
192
|
|
|
156
193
|
|
|
194
|
+
async def _creds_from_info(service_account_info: Mapping[str, str]) -> ServiceAccountCredentials:
|
|
195
|
+
service_account_credentials_from_string = functools.partial(
|
|
196
|
+
ServiceAccountCredentials.from_service_account_info, # type: ignore[reportUnknownMemberType]
|
|
197
|
+
scopes=['https://www.googleapis.com/auth/cloud-platform'],
|
|
198
|
+
)
|
|
199
|
+
return await anyio.to_thread.run_sync(service_account_credentials_from_string, service_account_info)
|
|
200
|
+
|
|
201
|
+
|
|
157
202
|
VertexAiRegion = Literal[
|
|
158
203
|
'asia-east1',
|
|
159
204
|
'asia-east2',
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai-slim"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.37"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs, slim package"
|
|
9
9
|
authors = [{ name = "Samuel Colvin", email = "samuel@pydantic.dev" }]
|
|
10
10
|
license = "MIT"
|
|
@@ -36,7 +36,7 @@ dependencies = [
|
|
|
36
36
|
"griffe>=1.3.2",
|
|
37
37
|
"httpx>=0.27",
|
|
38
38
|
"pydantic>=2.10",
|
|
39
|
-
"pydantic-graph==0.0.
|
|
39
|
+
"pydantic-graph==0.0.37",
|
|
40
40
|
"exceptiongroup; python_version < '3.11'",
|
|
41
41
|
"opentelemetry-api>=1.28.0",
|
|
42
42
|
"typing-inspection>=0.4.0",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|