pydantic-ai-slim 0.0.9__py3-none-any.whl → 0.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
pydantic_ai/agent.py
CHANGED
|
@@ -2,9 +2,11 @@ from __future__ import annotations as _annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import dataclasses
|
|
5
|
+
import inspect
|
|
5
6
|
from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
|
|
6
7
|
from contextlib import asynccontextmanager, contextmanager
|
|
7
8
|
from dataclasses import dataclass, field
|
|
9
|
+
from types import FrameType
|
|
8
10
|
from typing import Any, Callable, Generic, cast, final, overload
|
|
9
11
|
|
|
10
12
|
import logfire_api
|
|
@@ -54,6 +56,11 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
54
56
|
# dataclass fields mostly for my sanity — knowing what attributes are available
|
|
55
57
|
model: models.Model | models.KnownModelName | None
|
|
56
58
|
"""The default model configured for this agent."""
|
|
59
|
+
name: str | None
|
|
60
|
+
"""The name of the agent, used for logging.
|
|
61
|
+
|
|
62
|
+
If `None`, we try to infer the agent name from the call frame when the agent is first run.
|
|
63
|
+
"""
|
|
57
64
|
_result_schema: _result.ResultSchema[ResultData] | None = field(repr=False)
|
|
58
65
|
_result_validators: list[_result.ResultValidator[AgentDeps, ResultData]] = field(repr=False)
|
|
59
66
|
_allow_text_result: bool = field(repr=False)
|
|
@@ -79,6 +86,7 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
79
86
|
result_type: type[ResultData] = str,
|
|
80
87
|
system_prompt: str | Sequence[str] = (),
|
|
81
88
|
deps_type: type[AgentDeps] = NoneType,
|
|
89
|
+
name: str | None = None,
|
|
82
90
|
retries: int = 1,
|
|
83
91
|
result_tool_name: str = 'final_result',
|
|
84
92
|
result_tool_description: str | None = None,
|
|
@@ -98,6 +106,8 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
98
106
|
parameterize the agent, and therefore get the best out of static type checking.
|
|
99
107
|
If you're not using deps, but want type checking to pass, you can set `deps=None` to satisfy Pyright
|
|
100
108
|
or add a type hint `: Agent[None, <return type>]`.
|
|
109
|
+
name: The name of the agent, used for logging. If `None`, we try to infer the agent name from the call frame
|
|
110
|
+
when the agent is first run.
|
|
101
111
|
retries: The default number of retries to allow before raising an error.
|
|
102
112
|
result_tool_name: The name of the tool to use for the final result.
|
|
103
113
|
result_tool_description: The description of the final result tool.
|
|
@@ -115,6 +125,7 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
115
125
|
else:
|
|
116
126
|
self.model = models.infer_model(model)
|
|
117
127
|
|
|
128
|
+
self.name = name
|
|
118
129
|
self._result_schema = _result.ResultSchema[result_type].build(
|
|
119
130
|
result_type, result_tool_name, result_tool_description
|
|
120
131
|
)
|
|
@@ -139,6 +150,7 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
139
150
|
message_history: list[_messages.Message] | None = None,
|
|
140
151
|
model: models.Model | models.KnownModelName | None = None,
|
|
141
152
|
deps: AgentDeps = None,
|
|
153
|
+
infer_name: bool = True,
|
|
142
154
|
) -> result.RunResult[ResultData]:
|
|
143
155
|
"""Run the agent with a user prompt in async mode.
|
|
144
156
|
|
|
@@ -147,16 +159,19 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
147
159
|
message_history: History of the conversation so far.
|
|
148
160
|
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
149
161
|
deps: Optional dependencies to use for this run.
|
|
162
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
150
163
|
|
|
151
164
|
Returns:
|
|
152
165
|
The result of the run.
|
|
153
166
|
"""
|
|
167
|
+
if infer_name and self.name is None:
|
|
168
|
+
self._infer_name(inspect.currentframe())
|
|
154
169
|
model_used, custom_model, agent_model = await self._get_agent_model(model)
|
|
155
170
|
|
|
156
171
|
deps = self._get_deps(deps)
|
|
157
172
|
|
|
158
173
|
with _logfire.span(
|
|
159
|
-
'agent run {prompt=}',
|
|
174
|
+
'{agent.name} run {prompt=}',
|
|
160
175
|
prompt=user_prompt,
|
|
161
176
|
agent=self,
|
|
162
177
|
custom_model=custom_model,
|
|
@@ -208,6 +223,7 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
208
223
|
message_history: list[_messages.Message] | None = None,
|
|
209
224
|
model: models.Model | models.KnownModelName | None = None,
|
|
210
225
|
deps: AgentDeps = None,
|
|
226
|
+
infer_name: bool = True,
|
|
211
227
|
) -> result.RunResult[ResultData]:
|
|
212
228
|
"""Run the agent with a user prompt synchronously.
|
|
213
229
|
|
|
@@ -218,12 +234,17 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
218
234
|
message_history: History of the conversation so far.
|
|
219
235
|
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
220
236
|
deps: Optional dependencies to use for this run.
|
|
237
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
221
238
|
|
|
222
239
|
Returns:
|
|
223
240
|
The result of the run.
|
|
224
241
|
"""
|
|
242
|
+
if infer_name and self.name is None:
|
|
243
|
+
self._infer_name(inspect.currentframe())
|
|
225
244
|
loop = asyncio.get_event_loop()
|
|
226
|
-
return loop.run_until_complete(
|
|
245
|
+
return loop.run_until_complete(
|
|
246
|
+
self.run(user_prompt, message_history=message_history, model=model, deps=deps, infer_name=False)
|
|
247
|
+
)
|
|
227
248
|
|
|
228
249
|
@asynccontextmanager
|
|
229
250
|
async def run_stream(
|
|
@@ -233,6 +254,7 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
233
254
|
message_history: list[_messages.Message] | None = None,
|
|
234
255
|
model: models.Model | models.KnownModelName | None = None,
|
|
235
256
|
deps: AgentDeps = None,
|
|
257
|
+
infer_name: bool = True,
|
|
236
258
|
) -> AsyncIterator[result.StreamedRunResult[AgentDeps, ResultData]]:
|
|
237
259
|
"""Run the agent with a user prompt in async mode, returning a streamed response.
|
|
238
260
|
|
|
@@ -241,16 +263,21 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
241
263
|
message_history: History of the conversation so far.
|
|
242
264
|
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
243
265
|
deps: Optional dependencies to use for this run.
|
|
266
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
244
267
|
|
|
245
268
|
Returns:
|
|
246
269
|
The result of the run.
|
|
247
270
|
"""
|
|
271
|
+
if infer_name and self.name is None:
|
|
272
|
+
# f_back because `asynccontextmanager` adds one frame
|
|
273
|
+
if frame := inspect.currentframe(): # pragma: no branch
|
|
274
|
+
self._infer_name(frame.f_back)
|
|
248
275
|
model_used, custom_model, agent_model = await self._get_agent_model(model)
|
|
249
276
|
|
|
250
277
|
deps = self._get_deps(deps)
|
|
251
278
|
|
|
252
279
|
with _logfire.span(
|
|
253
|
-
'agent run stream {prompt=}',
|
|
280
|
+
'{agent.name} run stream {prompt=}',
|
|
254
281
|
prompt=user_prompt,
|
|
255
282
|
agent=self,
|
|
256
283
|
custom_model=custom_model,
|
|
@@ -798,6 +825,19 @@ class Agent(Generic[AgentDeps, ResultData]):
|
|
|
798
825
|
else:
|
|
799
826
|
return deps
|
|
800
827
|
|
|
828
|
+
def _infer_name(self, function_frame: FrameType | None) -> None:
|
|
829
|
+
"""Infer the agent name from the call frame.
|
|
830
|
+
|
|
831
|
+
Usage should be `self._infer_name(inspect.currentframe())`.
|
|
832
|
+
"""
|
|
833
|
+
assert self.name is None, 'Name already set'
|
|
834
|
+
if function_frame is not None: # pragma: no branch
|
|
835
|
+
if parent_frame := function_frame.f_back: # pragma: no branch
|
|
836
|
+
for name, item in parent_frame.f_locals.items():
|
|
837
|
+
if item is self:
|
|
838
|
+
self.name = name
|
|
839
|
+
return
|
|
840
|
+
|
|
801
841
|
|
|
802
842
|
@dataclass
|
|
803
843
|
class _MarkFinalResult(Generic[ResultData]):
|
|
@@ -4,7 +4,7 @@ pydantic_ai/_pydantic.py,sha256=oFfcHDv_wuL1NQ7mCzVHvP1HBaVzyvb7xS-_Iiri_tA,8491
|
|
|
4
4
|
pydantic_ai/_result.py,sha256=wzcfwDpr_sro1Vkn3DkyIhCXMHTReDxL_ZYm50JzdRI,9667
|
|
5
5
|
pydantic_ai/_system_prompt.py,sha256=vFT0y9Wykl5veGMgLLkGRYiHQrgdW2BZ1rwMn4izjjo,1085
|
|
6
6
|
pydantic_ai/_utils.py,sha256=eNb7f3-ZQC8WDEa87iUcXGQ-lyuutFQG-5yBCMD4Vvs,8227
|
|
7
|
-
pydantic_ai/agent.py,sha256=
|
|
7
|
+
pydantic_ai/agent.py,sha256=dB2_JshYBjK04fmzJP79wmoKJwMuEBaLmjIRVdwrISM,36854
|
|
8
8
|
pydantic_ai/exceptions.py,sha256=ko_47M0k6Rhg9mUC9P1cj7N4LCH6cC0pEsF65A2vL-U,1561
|
|
9
9
|
pydantic_ai/messages.py,sha256=I0_CPXDIGGSy-PXHuKq540oAXYOO9uyylpsfSsE4vLs,7032
|
|
10
10
|
pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -17,6 +17,6 @@ pydantic_ai/models/groq.py,sha256=Tx2yU3ysmPLBmWGsjzES-XcumzrsoBtB7spCnJBlLiM,14
|
|
|
17
17
|
pydantic_ai/models/openai.py,sha256=5ihH25CrS0tnZNW-BZw4GyPe8V-IxIHWw3B9ulPVjQE,14931
|
|
18
18
|
pydantic_ai/models/test.py,sha256=q1wch_E7TSb4qx9PCcP1YyBGZx567MGlAQhlAlON0S8,14463
|
|
19
19
|
pydantic_ai/models/vertexai.py,sha256=5wI8y2YjeRgSE51uKy5OtevQkks65uEbxIUAs5EGBaI,9161
|
|
20
|
-
pydantic_ai_slim-0.0.
|
|
21
|
-
pydantic_ai_slim-0.0.
|
|
22
|
-
pydantic_ai_slim-0.0.
|
|
20
|
+
pydantic_ai_slim-0.0.10.dist-info/METADATA,sha256=2-sEVelPFDeLZwYC4o8ZgGvyXvJsVXZIkj3Mx3wXs6g,2562
|
|
21
|
+
pydantic_ai_slim-0.0.10.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
|
|
22
|
+
pydantic_ai_slim-0.0.10.dist-info/RECORD,,
|
|
File without changes
|