pydantic-ai-slim 0.8.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (75) hide show
  1. pydantic_ai/__init__.py +28 -2
  2. pydantic_ai/_a2a.py +1 -1
  3. pydantic_ai/_agent_graph.py +323 -156
  4. pydantic_ai/_function_schema.py +5 -5
  5. pydantic_ai/_griffe.py +2 -1
  6. pydantic_ai/_otel_messages.py +2 -2
  7. pydantic_ai/_output.py +31 -35
  8. pydantic_ai/_parts_manager.py +7 -5
  9. pydantic_ai/_run_context.py +3 -1
  10. pydantic_ai/_system_prompt.py +2 -2
  11. pydantic_ai/_tool_manager.py +32 -28
  12. pydantic_ai/_utils.py +14 -26
  13. pydantic_ai/ag_ui.py +82 -51
  14. pydantic_ai/agent/__init__.py +84 -17
  15. pydantic_ai/agent/abstract.py +35 -4
  16. pydantic_ai/agent/wrapper.py +6 -0
  17. pydantic_ai/builtin_tools.py +2 -2
  18. pydantic_ai/common_tools/duckduckgo.py +4 -2
  19. pydantic_ai/durable_exec/temporal/__init__.py +70 -17
  20. pydantic_ai/durable_exec/temporal/_agent.py +93 -11
  21. pydantic_ai/durable_exec/temporal/_function_toolset.py +53 -6
  22. pydantic_ai/durable_exec/temporal/_logfire.py +6 -3
  23. pydantic_ai/durable_exec/temporal/_mcp_server.py +2 -1
  24. pydantic_ai/durable_exec/temporal/_model.py +2 -2
  25. pydantic_ai/durable_exec/temporal/_run_context.py +2 -1
  26. pydantic_ai/durable_exec/temporal/_toolset.py +2 -1
  27. pydantic_ai/exceptions.py +45 -2
  28. pydantic_ai/format_prompt.py +2 -2
  29. pydantic_ai/mcp.py +15 -27
  30. pydantic_ai/messages.py +156 -44
  31. pydantic_ai/models/__init__.py +20 -7
  32. pydantic_ai/models/anthropic.py +10 -17
  33. pydantic_ai/models/bedrock.py +55 -57
  34. pydantic_ai/models/cohere.py +3 -3
  35. pydantic_ai/models/fallback.py +2 -2
  36. pydantic_ai/models/function.py +25 -23
  37. pydantic_ai/models/gemini.py +13 -14
  38. pydantic_ai/models/google.py +19 -5
  39. pydantic_ai/models/groq.py +127 -39
  40. pydantic_ai/models/huggingface.py +5 -5
  41. pydantic_ai/models/instrumented.py +49 -21
  42. pydantic_ai/models/mcp_sampling.py +3 -1
  43. pydantic_ai/models/mistral.py +8 -8
  44. pydantic_ai/models/openai.py +37 -42
  45. pydantic_ai/models/test.py +24 -4
  46. pydantic_ai/output.py +27 -32
  47. pydantic_ai/profiles/__init__.py +3 -3
  48. pydantic_ai/profiles/groq.py +1 -1
  49. pydantic_ai/profiles/openai.py +25 -4
  50. pydantic_ai/providers/__init__.py +4 -0
  51. pydantic_ai/providers/anthropic.py +2 -3
  52. pydantic_ai/providers/bedrock.py +3 -2
  53. pydantic_ai/providers/google_vertex.py +2 -1
  54. pydantic_ai/providers/groq.py +21 -2
  55. pydantic_ai/providers/litellm.py +134 -0
  56. pydantic_ai/result.py +173 -52
  57. pydantic_ai/retries.py +52 -31
  58. pydantic_ai/run.py +12 -5
  59. pydantic_ai/tools.py +127 -23
  60. pydantic_ai/toolsets/__init__.py +4 -1
  61. pydantic_ai/toolsets/_dynamic.py +4 -4
  62. pydantic_ai/toolsets/abstract.py +18 -2
  63. pydantic_ai/toolsets/approval_required.py +32 -0
  64. pydantic_ai/toolsets/combined.py +7 -12
  65. pydantic_ai/toolsets/{deferred.py → external.py} +11 -5
  66. pydantic_ai/toolsets/filtered.py +1 -1
  67. pydantic_ai/toolsets/function.py +58 -21
  68. pydantic_ai/toolsets/wrapper.py +2 -1
  69. pydantic_ai/usage.py +44 -8
  70. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0.dist-info}/METADATA +8 -9
  71. pydantic_ai_slim-1.0.0.dist-info/RECORD +121 -0
  72. pydantic_ai_slim-0.8.0.dist-info/RECORD +0 -119
  73. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0.dist-info}/WHEEL +0 -0
  74. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0.dist-info}/entry_points.txt +0 -0
  75. {pydantic_ai_slim-0.8.0.dist-info → pydantic_ai_slim-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections.abc import Awaitable, Sequence
3
+ from collections.abc import Awaitable, Callable, Sequence
4
4
  from dataclasses import dataclass, replace
5
- from typing import Any, Callable, overload
5
+ from typing import Any, overload
6
6
 
7
7
  from pydantic.json_schema import GenerateJsonSchema
8
8
 
@@ -19,7 +19,7 @@ from ..tools import (
19
19
  from .abstract import AbstractToolset, ToolsetTool
20
20
 
21
21
 
22
- @dataclass
22
+ @dataclass(kw_only=True)
23
23
  class FunctionToolsetTool(ToolsetTool[AgentDepsT]):
24
24
  """A tool definition for a function toolset tool that keeps track of the function to call."""
25
25
 
@@ -33,26 +33,43 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
33
33
  See [toolset docs](../toolsets.md#function-toolset) for more information.
34
34
  """
35
35
 
36
- max_retries: int
37
36
  tools: dict[str, Tool[Any]]
37
+ max_retries: int
38
38
  _id: str | None
39
+ docstring_format: DocstringFormat
40
+ require_parameter_descriptions: bool
41
+ schema_generator: type[GenerateJsonSchema]
39
42
 
40
43
  def __init__(
41
44
  self,
42
45
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [],
43
- max_retries: int = 1,
44
46
  *,
47
+ max_retries: int = 1,
45
48
  id: str | None = None,
49
+ docstring_format: DocstringFormat = 'auto',
50
+ require_parameter_descriptions: bool = False,
51
+ schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
46
52
  ):
47
53
  """Build a new function toolset.
48
54
 
49
55
  Args:
50
56
  tools: The tools to add to the toolset.
51
57
  max_retries: The maximum number of retries for each tool during a run.
52
- id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal, in which case the ID will be used to identify the toolset's activities within the workflow.
58
+ id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal,
59
+ in which case the ID will be used to identify the toolset's activities within the workflow.
60
+ docstring_format: Format of tool docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
61
+ Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
62
+ Applies to all tools, unless overridden when adding a tool.
63
+ require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
64
+ Applies to all tools, unless overridden when adding a tool.
65
+ schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
66
+ Applies to all tools, unless overridden when adding a tool.
53
67
  """
54
68
  self.max_retries = max_retries
55
69
  self._id = id
70
+ self.docstring_format = docstring_format
71
+ self.require_parameter_descriptions = require_parameter_descriptions
72
+ self.schema_generator = schema_generator
56
73
 
57
74
  self.tools = {}
58
75
  for tool in tools:
@@ -76,10 +93,11 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
76
93
  name: str | None = None,
77
94
  retries: int | None = None,
78
95
  prepare: ToolPrepareFunc[AgentDepsT] | None = None,
79
- docstring_format: DocstringFormat = 'auto',
80
- require_parameter_descriptions: bool = False,
81
- schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
96
+ docstring_format: DocstringFormat | None = None,
97
+ require_parameter_descriptions: bool | None = None,
98
+ schema_generator: type[GenerateJsonSchema] | None = None,
82
99
  strict: bool | None = None,
100
+ requires_approval: bool = False,
83
101
  ) -> Callable[[ToolFuncEither[AgentDepsT, ToolParams]], ToolFuncEither[AgentDepsT, ToolParams]]: ...
84
102
 
85
103
  def tool(
@@ -90,10 +108,11 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
90
108
  name: str | None = None,
91
109
  retries: int | None = None,
92
110
  prepare: ToolPrepareFunc[AgentDepsT] | None = None,
93
- docstring_format: DocstringFormat = 'auto',
94
- require_parameter_descriptions: bool = False,
95
- schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
111
+ docstring_format: DocstringFormat | None = None,
112
+ require_parameter_descriptions: bool | None = None,
113
+ schema_generator: type[GenerateJsonSchema] | None = None,
96
114
  strict: bool | None = None,
115
+ requires_approval: bool = False,
97
116
  ) -> Any:
98
117
  """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.
99
118
 
@@ -135,11 +154,15 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
135
154
  tool from a given step. This is useful if you want to customise a tool at call time,
136
155
  or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
137
156
  docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
138
- Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
139
- require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
140
- schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
157
+ If `None`, the default value is determined by the toolset.
158
+ require_parameter_descriptions: If True, raise an error if a parameter description is missing.
159
+ If `None`, the default value is determined by the toolset.
160
+ schema_generator: The JSON schema generator class to use for this tool.
161
+ If `None`, the default value is determined by the toolset.
141
162
  strict: Whether to enforce JSON schema compliance (only affects OpenAI).
142
163
  See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
164
+ requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
165
+ See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
143
166
  """
144
167
 
145
168
  def tool_decorator(
@@ -156,6 +179,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
156
179
  require_parameter_descriptions,
157
180
  schema_generator,
158
181
  strict,
182
+ requires_approval,
159
183
  )
160
184
  return func_
161
185
 
@@ -168,10 +192,11 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
168
192
  name: str | None = None,
169
193
  retries: int | None = None,
170
194
  prepare: ToolPrepareFunc[AgentDepsT] | None = None,
171
- docstring_format: DocstringFormat = 'auto',
172
- require_parameter_descriptions: bool = False,
173
- schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
195
+ docstring_format: DocstringFormat | None = None,
196
+ require_parameter_descriptions: bool | None = None,
197
+ schema_generator: type[GenerateJsonSchema] | None = None,
174
198
  strict: bool | None = None,
199
+ requires_approval: bool = False,
175
200
  ) -> None:
176
201
  """Add a function as a tool to the toolset.
177
202
 
@@ -190,12 +215,23 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
190
215
  tool from a given step. This is useful if you want to customise a tool at call time,
191
216
  or omit it completely from a step. See [`ToolPrepareFunc`][pydantic_ai.tools.ToolPrepareFunc].
192
217
  docstring_format: The format of the docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
193
- Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
194
- require_parameter_descriptions: If True, raise an error if a parameter description is missing. Defaults to False.
195
- schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
218
+ If `None`, the default value is determined by the toolset.
219
+ require_parameter_descriptions: If True, raise an error if a parameter description is missing.
220
+ If `None`, the default value is determined by the toolset.
221
+ schema_generator: The JSON schema generator class to use for this tool.
222
+ If `None`, the default value is determined by the toolset.
196
223
  strict: Whether to enforce JSON schema compliance (only affects OpenAI).
197
224
  See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
225
+ requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
226
+ See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
198
227
  """
228
+ if docstring_format is None:
229
+ docstring_format = self.docstring_format
230
+ if require_parameter_descriptions is None:
231
+ require_parameter_descriptions = self.require_parameter_descriptions
232
+ if schema_generator is None:
233
+ schema_generator = self.schema_generator
234
+
199
235
  tool = Tool[AgentDepsT](
200
236
  func,
201
237
  takes_ctx=takes_ctx,
@@ -206,6 +242,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
206
242
  require_parameter_descriptions=require_parameter_descriptions,
207
243
  schema_generator=schema_generator,
208
244
  strict=strict,
245
+ requires_approval=requires_approval,
209
246
  )
210
247
  self.add_tool(tool)
211
248
 
@@ -1,7 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from collections.abc import Callable
3
4
  from dataclasses import dataclass, replace
4
- from typing import Any, Callable
5
+ from typing import Any
5
6
 
6
7
  from typing_extensions import Self
7
8
 
pydantic_ai/usage.py CHANGED
@@ -3,7 +3,9 @@ from __future__ import annotations as _annotations
3
3
  import dataclasses
4
4
  from copy import copy
5
5
  from dataclasses import dataclass, fields
6
+ from typing import Annotated
6
7
 
8
+ from pydantic import AliasChoices, BeforeValidator, Field
7
9
  from typing_extensions import deprecated, overload
8
10
 
9
11
  from . import _utils
@@ -12,9 +14,13 @@ from .exceptions import UsageLimitExceeded
12
14
  __all__ = 'RequestUsage', 'RunUsage', 'Usage', 'UsageLimits'
13
15
 
14
16
 
15
- @dataclass(repr=False)
17
+ @dataclass(repr=False, kw_only=True)
16
18
  class UsageBase:
17
- input_tokens: int = 0
19
+ input_tokens: Annotated[
20
+ int,
21
+ # `request_tokens` is deprecated, but we still want to support deserializing model responses stored in a DB before the name was changed
22
+ Field(validation_alias=AliasChoices('input_tokens', 'request_tokens')),
23
+ ] = 0
18
24
  """Number of input/prompt tokens."""
19
25
 
20
26
  cache_write_tokens: int = 0
@@ -22,7 +28,11 @@ class UsageBase:
22
28
  cache_read_tokens: int = 0
23
29
  """Number of tokens read from the cache."""
24
30
 
25
- output_tokens: int = 0
31
+ output_tokens: Annotated[
32
+ int,
33
+ # `response_tokens` is deprecated, but we still want to support deserializing model responses stored in a DB before the name was changed
34
+ Field(validation_alias=AliasChoices('output_tokens', 'response_tokens')),
35
+ ] = 0
26
36
  """Number of output/completion tokens."""
27
37
 
28
38
  input_audio_tokens: int = 0
@@ -32,7 +42,11 @@ class UsageBase:
32
42
  output_audio_tokens: int = 0
33
43
  """Number of audio output tokens."""
34
44
 
35
- details: dict[str, int] = dataclasses.field(default_factory=dict)
45
+ details: Annotated[
46
+ dict[str, int],
47
+ # `details` can not be `None` any longer, but we still want to support deserializing model responses stored in a DB before this was changed
48
+ BeforeValidator(lambda d: d or {}),
49
+ ] = dataclasses.field(default_factory=dict)
36
50
  """Any extra details returned by the model."""
37
51
 
38
52
  @property
@@ -75,7 +89,7 @@ class UsageBase:
75
89
  return any(dataclasses.asdict(self).values())
76
90
 
77
91
 
78
- @dataclass(repr=False)
92
+ @dataclass(repr=False, kw_only=True)
79
93
  class RequestUsage(UsageBase):
80
94
  """LLM usage associated with a single request.
81
95
 
@@ -107,7 +121,7 @@ class RequestUsage(UsageBase):
107
121
  return new_usage
108
122
 
109
123
 
110
- @dataclass(repr=False)
124
+ @dataclass(repr=False, kw_only=True)
111
125
  class RunUsage(UsageBase):
112
126
  """LLM usage associated with an agent run.
113
127
 
@@ -117,16 +131,21 @@ class RunUsage(UsageBase):
117
131
  requests: int = 0
118
132
  """Number of requests made to the LLM API."""
119
133
 
134
+ tool_calls: int = 0
135
+ """Number of successful tool calls executed during the run."""
136
+
120
137
  input_tokens: int = 0
121
138
  """Total number of text input/prompt tokens."""
122
139
 
123
140
  cache_write_tokens: int = 0
124
141
  """Total number of tokens written to the cache."""
142
+
125
143
  cache_read_tokens: int = 0
126
144
  """Total number of tokens read from the cache."""
127
145
 
128
146
  input_audio_tokens: int = 0
129
147
  """Total number of audio input tokens."""
148
+
130
149
  cache_audio_read_tokens: int = 0
131
150
  """Total number of audio tokens read from the cache."""
132
151
 
@@ -144,6 +163,7 @@ class RunUsage(UsageBase):
144
163
  """
145
164
  if isinstance(incr_usage, RunUsage):
146
165
  self.requests += incr_usage.requests
166
+ self.tool_calls += incr_usage.tool_calls
147
167
  return _incr_usage_tokens(self, incr_usage)
148
168
 
149
169
  def __add__(self, other: RunUsage | RequestUsage) -> RunUsage:
@@ -174,13 +194,13 @@ def _incr_usage_tokens(slf: RunUsage | RequestUsage, incr_usage: RunUsage | Requ
174
194
  slf.details[key] = slf.details.get(key, 0) + value
175
195
 
176
196
 
177
- @dataclass
197
+ @dataclass(repr=False, kw_only=True)
178
198
  @deprecated('`Usage` is deprecated, use `RunUsage` instead')
179
199
  class Usage(RunUsage):
180
200
  """Deprecated alias for `RunUsage`."""
181
201
 
182
202
 
183
- @dataclass(repr=False)
203
+ @dataclass(repr=False, kw_only=True)
184
204
  class UsageLimits:
185
205
  """Limits on model usage.
186
206
 
@@ -192,6 +212,8 @@ class UsageLimits:
192
212
 
193
213
  request_limit: int | None = 50
194
214
  """The maximum number of requests allowed to the model."""
215
+ tool_calls_limit: int | None = None
216
+ """The maximum number of successful tool calls allowed to be executed."""
195
217
  input_tokens_limit: int | None = None
196
218
  """The maximum number of input/prompt tokens allowed."""
197
219
  output_tokens_limit: int | None = None
@@ -218,12 +240,14 @@ class UsageLimits:
218
240
  self,
219
241
  *,
220
242
  request_limit: int | None = 50,
243
+ tool_calls_limit: int | None = None,
221
244
  input_tokens_limit: int | None = None,
222
245
  output_tokens_limit: int | None = None,
223
246
  total_tokens_limit: int | None = None,
224
247
  count_tokens_before_request: bool = False,
225
248
  ) -> None:
226
249
  self.request_limit = request_limit
250
+ self.tool_calls_limit = tool_calls_limit
227
251
  self.input_tokens_limit = input_tokens_limit
228
252
  self.output_tokens_limit = output_tokens_limit
229
253
  self.total_tokens_limit = total_tokens_limit
@@ -237,12 +261,14 @@ class UsageLimits:
237
261
  self,
238
262
  *,
239
263
  request_limit: int | None = 50,
264
+ tool_calls_limit: int | None = None,
240
265
  request_tokens_limit: int | None = None,
241
266
  response_tokens_limit: int | None = None,
242
267
  total_tokens_limit: int | None = None,
243
268
  count_tokens_before_request: bool = False,
244
269
  ) -> None:
245
270
  self.request_limit = request_limit
271
+ self.tool_calls_limit = tool_calls_limit
246
272
  self.input_tokens_limit = request_tokens_limit
247
273
  self.output_tokens_limit = response_tokens_limit
248
274
  self.total_tokens_limit = total_tokens_limit
@@ -252,6 +278,7 @@ class UsageLimits:
252
278
  self,
253
279
  *,
254
280
  request_limit: int | None = 50,
281
+ tool_calls_limit: int | None = None,
255
282
  input_tokens_limit: int | None = None,
256
283
  output_tokens_limit: int | None = None,
257
284
  total_tokens_limit: int | None = None,
@@ -261,6 +288,7 @@ class UsageLimits:
261
288
  response_tokens_limit: int | None = None,
262
289
  ):
263
290
  self.request_limit = request_limit
291
+ self.tool_calls_limit = tool_calls_limit
264
292
  self.input_tokens_limit = input_tokens_limit or request_tokens_limit
265
293
  self.output_tokens_limit = output_tokens_limit or response_tokens_limit
266
294
  self.total_tokens_limit = total_tokens_limit
@@ -312,4 +340,12 @@ class UsageLimits:
312
340
  if self.total_tokens_limit is not None and total_tokens > self.total_tokens_limit:
313
341
  raise UsageLimitExceeded(f'Exceeded the total_tokens_limit of {self.total_tokens_limit} ({total_tokens=})')
314
342
 
343
+ def check_before_tool_call(self, usage: RunUsage) -> None:
344
+ """Raises a `UsageLimitExceeded` exception if the next tool call would exceed the tool call limit."""
345
+ tool_calls_limit = self.tool_calls_limit
346
+ if tool_calls_limit is not None and usage.tool_calls >= tool_calls_limit:
347
+ raise UsageLimitExceeded(
348
+ f'The next tool call would exceed the tool_calls_limit of {tool_calls_limit} (tool_calls={usage.tool_calls})'
349
+ )
350
+
315
351
  __repr__ = _utils.dataclasses_no_defaults_repr
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.8.0
3
+ Version: 1.0.0
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -9,7 +9,7 @@ Project-URL: Changelog, https://github.com/pydantic/pydantic-ai/releases
9
9
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
10
10
  License-Expression: MIT
11
11
  License-File: LICENSE
12
- Classifier: Development Status :: 4 - Beta
12
+ Classifier: Development Status :: 5 - Production/Stable
13
13
  Classifier: Environment :: Console
14
14
  Classifier: Environment :: MacOS X
15
15
  Classifier: Intended Audience :: Developers
@@ -21,21 +21,20 @@ Classifier: Operating System :: Unix
21
21
  Classifier: Programming Language :: Python
22
22
  Classifier: Programming Language :: Python :: 3
23
23
  Classifier: Programming Language :: Python :: 3 :: Only
24
- Classifier: Programming Language :: Python :: 3.9
25
24
  Classifier: Programming Language :: Python :: 3.10
26
25
  Classifier: Programming Language :: Python :: 3.11
27
26
  Classifier: Programming Language :: Python :: 3.12
28
27
  Classifier: Programming Language :: Python :: 3.13
29
28
  Classifier: Topic :: Internet
30
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
31
- Requires-Python: >=3.9
30
+ Requires-Python: >=3.10
32
31
  Requires-Dist: eval-type-backport>=0.2.0
33
32
  Requires-Dist: exceptiongroup; python_version < '3.11'
34
33
  Requires-Dist: genai-prices>=0.0.22
35
34
  Requires-Dist: griffe>=1.3.2
36
35
  Requires-Dist: httpx>=0.27
37
36
  Requires-Dist: opentelemetry-api>=1.28.0
38
- Requires-Dist: pydantic-graph==0.8.0
37
+ Requires-Dist: pydantic-graph==1.0.0
39
38
  Requires-Dist: pydantic>=2.10
40
39
  Requires-Dist: typing-inspection>=0.4.0
41
40
  Provides-Extra: a2a
@@ -57,7 +56,7 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
57
56
  Provides-Extra: duckduckgo
58
57
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
58
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==0.8.0; extra == 'evals'
59
+ Requires-Dist: pydantic-evals==1.0.0; extra == 'evals'
61
60
  Provides-Extra: google
62
61
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
62
  Provides-Extra: groq
@@ -65,9 +64,9 @@ Requires-Dist: groq>=0.25.0; extra == 'groq'
65
64
  Provides-Extra: huggingface
66
65
  Requires-Dist: huggingface-hub[inference]>=0.33.5; extra == 'huggingface'
67
66
  Provides-Extra: logfire
68
- Requires-Dist: logfire>=3.14.1; extra == 'logfire'
67
+ Requires-Dist: logfire[httpx]>=3.14.1; extra == 'logfire'
69
68
  Provides-Extra: mcp
70
- Requires-Dist: mcp>=1.12.3; (python_version >= '3.10') and extra == 'mcp'
69
+ Requires-Dist: mcp>=1.12.3; extra == 'mcp'
71
70
  Provides-Extra: mistral
72
71
  Requires-Dist: mistralai>=1.9.2; extra == 'mistral'
73
72
  Provides-Extra: openai
@@ -77,7 +76,7 @@ Requires-Dist: tenacity>=8.2.3; extra == 'retries'
77
76
  Provides-Extra: tavily
78
77
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
79
78
  Provides-Extra: temporal
80
- Requires-Dist: temporalio==1.15.0; extra == 'temporal'
79
+ Requires-Dist: temporalio==1.17.0; extra == 'temporal'
81
80
  Provides-Extra: vertexai
82
81
  Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
83
82
  Requires-Dist: requests>=2.32.2; extra == 'vertexai'
@@ -0,0 +1,121 @@
1
+ pydantic_ai/__init__.py,sha256=CfqGPSjKlDl5iw1L48HbELsDuzxIzBFnFnovI_GcFWA,2083
2
+ pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
+ pydantic_ai/_a2a.py,sha256=2Hopcyl6o6U91eVkd7iAbEPYA5f0hJb8A5_fwMC0UfM,12168
4
+ pydantic_ai/_agent_graph.py,sha256=lgifW_LwNATj0usC4wfU32Z4PEsPNHcEbbHtzIj6Y_0,46620
5
+ pydantic_ai/_cli.py,sha256=C-Uvbdx9wWnNqZKHN_r8d4mGte_aIPikOkKrTPvdrN8,14057
6
+ pydantic_ai/_function_schema.py,sha256=olbmUMQoQV5qKV4j0-cOnhcTINz4uYyeDqMyusrFRtY,11234
7
+ pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
8
+ pydantic_ai/_mcp.py,sha256=PuvwnlLjv7YYOa9AZJCrklevBug99zGMhwJCBGG7BHQ,5626
9
+ pydantic_ai/_otel_messages.py,sha256=qLu81aBDEAsUTW6efBzWRXNDMICTrUUBpcGbCEyXr4o,1480
10
+ pydantic_ai/_output.py,sha256=0Oq-FFvxXdR0Ia_8LrJ1CanGOWkI5C98HfdkY8TZhik,37442
11
+ pydantic_ai/_parts_manager.py,sha256=SZi2_G9Z5Z9BLuInfgcki9p5yUhVjR38WcxfuOoECLA,18057
12
+ pydantic_ai/_run_context.py,sha256=AFSTtOBbUAnPpM-V5_b5fLMVAFbEBX4oOdYsGR9ayt4,1824
13
+ pydantic_ai/_system_prompt.py,sha256=WdDW_DTGHujcFFaK-J7J6mA4ZDJZ0IOKpyizJA-1Y5Q,1142
14
+ pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU,1304
15
+ pydantic_ai/_tool_manager.py,sha256=YUTA9uJneug_vkxCGrTc34Qxyzz2dAuDNt5n4x2k0zA,9513
16
+ pydantic_ai/_utils.py,sha256=xa2PoAcTN-oXhfXOONOighmue-jtSv668o9Fu_IdO0A,16062
17
+ pydantic_ai/ag_ui.py,sha256=L21cc_LN532psY70GNgJNaj-PtBpLgqARJBF1zb2ZX4,28127
18
+ pydantic_ai/builtin_tools.py,sha256=t0wa6KsgDCRoZMKJKRzRDyxaz1X4mDWMHlGjQmqFLdg,3222
19
+ pydantic_ai/direct.py,sha256=zMsz6poVgEq7t7L_8FWM6hmKdqTzjyQYL5xzQt_59Us,14951
20
+ pydantic_ai/exceptions.py,sha256=zsXZMKf2BJuVsfuHl1fWTkogLU37bd4yq7D6BKHAzVs,4968
21
+ pydantic_ai/format_prompt.py,sha256=37imBG2Fgpn-_RfAFalOX8Xc_XpGH2gY9tnhJDvxfk8,4243
22
+ pydantic_ai/mcp.py,sha256=1sKEgd8Eue5vWaUyU-s3TWlOaH6rfXetsuz_sHYorXE,30384
23
+ pydantic_ai/messages.py,sha256=5SeqvRf0dMe3BTEtvTHTmcm3MMz7PIq5zIS6RRAhtIg,54544
24
+ pydantic_ai/output.py,sha256=wzNgVKJgxyXtSH-uNbRxIaUNLidxlQcwWYT2o1gY2hE,12037
25
+ pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ pydantic_ai/result.py,sha256=FrJbd0nwaRVIxGH_EhV-ITQvrrd-JaDya9EDsE5-Pps,25389
27
+ pydantic_ai/retries.py,sha256=QM4oDA9DG-Y2qP06fbCp8Dqq8ups40Rr4HYjAOlbNyM,14650
28
+ pydantic_ai/run.py,sha256=qpTu2Q2O3lvcQAZREuIpyL0vQN13AvW99SwD7Oe9hKc,15175
29
+ pydantic_ai/settings.py,sha256=yuUZ7-GkdPB-Gbx71kSdh8dSr6gwM9gEwk84qNxPO_I,3552
30
+ pydantic_ai/tools.py,sha256=SFiLSHc4TwlhZ73nPpjd4etVovsSyPcYEQ9520Gz8xI,19169
31
+ pydantic_ai/usage.py,sha256=UoSOwhH-NTAeXl7Tq8GWXcW82m8zQLQvThvQehEx08g,14070
32
+ pydantic_ai/agent/__init__.py,sha256=FLlpv4-j0-94xEMA5GV4HGz7K3Y1zrMb_cVvlp-XDNs,62321
33
+ pydantic_ai/agent/abstract.py,sha256=nKDP_T0hRhi1RGVNIcAQbmQkTrwGWbTxt_Lzwfa7cPs,44291
34
+ pydantic_ai/agent/wrapper.py,sha256=--IJo8Yb-2uzcCBSIB9oB_9FQ1R7yZYkWnLSq0iUExs,9464
35
+ pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
+ pydantic_ai/common_tools/duckduckgo.py,sha256=cJd-BUg-i50E0QjKveRCndGlU5GdvLq9UgNNJ18VQIQ,2263
37
+ pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQXD7E4,2495
38
+ pydantic_ai/durable_exec/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ pydantic_ai/durable_exec/temporal/__init__.py,sha256=XKwy68wfgmjr057nolRwGHTKiadxufpQEGEUprAV09k,5563
40
+ pydantic_ai/durable_exec/temporal/_agent.py,sha256=9-yANaXqbOELkvTu_zeJI5tROKUY2t9-WYUIksSmzD8,36901
41
+ pydantic_ai/durable_exec/temporal/_function_toolset.py,sha256=Hnfz3ukOqgq8j3h9u97S-fMfq4un1HZA4kxN2irWD_0,5562
42
+ pydantic_ai/durable_exec/temporal/_logfire.py,sha256=5bSiOt-jihQATJsg-jrGmEqP3RWW_Sz6c2aicjt03lI,2009
43
+ pydantic_ai/durable_exec/temporal/_mcp_server.py,sha256=VFvHPVhvYz-ITGaXXNyuWwB8tsdF3Hg9rs7gss8TKWY,6032
44
+ pydantic_ai/durable_exec/temporal/_model.py,sha256=cFHrk-yM65d41TPiWp5hwZEqXBNO6lNMtcU587j1b58,6765
45
+ pydantic_ai/durable_exec/temporal/_run_context.py,sha256=IMLEW4AqHklumLiRBUTW-ogJGiH_tX3nCrFrxD7CbFw,2390
46
+ pydantic_ai/durable_exec/temporal/_toolset.py,sha256=HxmQ5vut7Zd5eyrC27eNNn5_CHA_4-yJL_Pk8cKZSOs,2892
47
+ pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
+ pydantic_ai/ext/aci.py,sha256=sUllKDNO-LOMurbFgxwRHuzNlBkSa3aVBqXfEm-A_vo,2545
49
+ pydantic_ai/ext/langchain.py,sha256=iLVEZv1kcLkdIHo3us2yfdi0kVqyJ6qTaCt9BoLWm4k,2335
50
+ pydantic_ai/models/__init__.py,sha256=BmZ4kyiGIjsapj7cmKc6uF4EEOnuN6iiDumuOeGfxOk,35692
51
+ pydantic_ai/models/anthropic.py,sha256=IsHBwJ55IYor27sewq039rc923fqi3AUWXAS_vbA978,30286
52
+ pydantic_ai/models/bedrock.py,sha256=Eeq-rqZWaVu2tbH8eSNoxp5dq5RH8esnsOp-ge_XGu4,30786
53
+ pydantic_ai/models/cohere.py,sha256=aKgpcYfIKwMfEroWxfLyzYu8ELuddF4TXv7s0LU3Pcc,13052
54
+ pydantic_ai/models/fallback.py,sha256=XJ74wRxVT4dF0uewHH3is9I-zcLBK8KFIhpK3BB6mRw,5526
55
+ pydantic_ai/models/function.py,sha256=uWGdw4sFhhmczjU44rwe6i2XFafOXAalIigWGCSivYg,14231
56
+ pydantic_ai/models/gemini.py,sha256=DYEaOnwGmo9FUGVkRRrydGuQwYhnO-Cq5grTurLWgb4,39376
57
+ pydantic_ai/models/google.py,sha256=cqU6eBMmSZvkeeaEPlJVhkRxN7L5vi4EfyWabZkgP5g,32102
58
+ pydantic_ai/models/groq.py,sha256=am-Qpp6RLFqwRnouIdACWd5nxOBB92Bn0hRs-VdrD38,25561
59
+ pydantic_ai/models/huggingface.py,sha256=sWjHTVfqOtdlOENdERkPxtGjQ8quUNepPjqlXSR7aGk,20417
60
+ pydantic_ai/models/instrumented.py,sha256=t9ESv5XMqJ4OWkTp3JoIcaCzlnW7pktelQVmX5fpySM,20763
61
+ pydantic_ai/models/mcp_sampling.py,sha256=qnLCO3CB5bNQ86SpWRA-CSSOVcCCLPwjHtcNFvW9wHs,3461
62
+ pydantic_ai/models/mistral.py,sha256=yS5pBYtFUkICwkvGN23iBbBfaBASN1DARsB6QQbBjOc,32344
63
+ pydantic_ai/models/openai.py,sha256=vyePy3s_pI0gnkizas1Vfbiy8PI2_5skNcsnG03Fzas,64818
64
+ pydantic_ai/models/test.py,sha256=1kBwi7pSUt9_K1U-hokOilplxJWPQ3KRKH_s8bYmt_s,19969
65
+ pydantic_ai/models/wrapper.py,sha256=9MeHW7mXPsEK03IKL0rtjeX6QgXyZROOOzLh72GiX2k,2148
66
+ pydantic_ai/profiles/__init__.py,sha256=V6uGAVJuIaYRuZOQjkdIyFfDKD5py18RC98njnHOFug,3293
67
+ pydantic_ai/profiles/_json_schema.py,sha256=CthOGmPSjgEZRRglfvg31zyQ9vjHDdacXoFpmba93dE,7206
68
+ pydantic_ai/profiles/amazon.py,sha256=IPa2wydpcbFLLvhDK35-pwwoKo0Pg4vP84823fHx0zc,314
69
+ pydantic_ai/profiles/anthropic.py,sha256=J9N46G8eOjHdQ5CwZSLiwGdPb0eeIMdsMjwosDpvNhI,275
70
+ pydantic_ai/profiles/cohere.py,sha256=lcL34Ht1jZopwuqoU6OV9l8vN4zwF-jiPjlsEABbSRo,215
71
+ pydantic_ai/profiles/deepseek.py,sha256=JDwfkr-0YovlB3jEKk7dNFvepxNf_YuLgLkGCtyXHSk,282
72
+ pydantic_ai/profiles/google.py,sha256=cd5zwtx0MU1Xwm8c-oqi2_OJ2-PMJ8Vy23mxvSJF7ik,4856
73
+ pydantic_ai/profiles/grok.py,sha256=nBOxOCYCK9aiLmz2Q-esqYhotNbbBC1boAoOYIk1tVw,211
74
+ pydantic_ai/profiles/groq.py,sha256=jD_vG6M5q_uwLmJgkPavWWhGCqo3HvT_4UYfwzC1BMU,682
75
+ pydantic_ai/profiles/harmony.py,sha256=_81tOGOYGTH3Za67jjtdINvASTTM5_CTyc1Ej2KHJQw,500
76
+ pydantic_ai/profiles/meta.py,sha256=JdZcpdRWx8PY1pU9Z2i_TYtA0Cpbg23xyFrV7eXnooY,309
77
+ pydantic_ai/profiles/mistral.py,sha256=ll01PmcK3szwlTfbaJLQmfd0TADN8lqjov9HpPJzCMQ,217
78
+ pydantic_ai/profiles/moonshotai.py,sha256=e1RJnbEvazE6aJAqfmYLYGNtwNwg52XQDRDkcLrv3fU,272
79
+ pydantic_ai/profiles/openai.py,sha256=4w-xzTfn6PKQwmT-Cc13Wit9UcYGapo6eD1q8LgUHRU,9038
80
+ pydantic_ai/profiles/qwen.py,sha256=9SnTpMKndxNQMFyumyaOczJa5JGWbYQdpVKKW4OzKjk,749
81
+ pydantic_ai/providers/__init__.py,sha256=QlFpPM_kGnF_YAzwB9Dgmxx4Emp33x0bh831H_xKDXE,4478
82
+ pydantic_ai/providers/anthropic.py,sha256=bDFNAE4WB66Dn7YDnI3dv6yBbMmM9Kzt2kalM4Fq8WQ,3158
83
+ pydantic_ai/providers/azure.py,sha256=msYyeQoHATxCJkiF1N05lPSJivh-SWKK1463WF6xTK4,5823
84
+ pydantic_ai/providers/bedrock.py,sha256=nN5CQ0XOQHp8FSlS7KCjn-p1hYcx0zVeLcMu_tAbvz8,5825
85
+ pydantic_ai/providers/cerebras.py,sha256=2zgsNxup_7OEPOXnbJHMYnVRDnB9UYTQnOO4wv7xnYA,3436
86
+ pydantic_ai/providers/cohere.py,sha256=-F0prLuI2aDtHNZakd1GlyvgFLio-fo5n6fRbyPMvro,2858
87
+ pydantic_ai/providers/deepseek.py,sha256=JSc7dQbB-l7_Phf61ZLb4_c1oym9fHea_h2Yq88uoL8,3032
88
+ pydantic_ai/providers/fireworks.py,sha256=-jMRxbt353nENdpxuDpC4zJZ9wlJBcWa4wdcUk4cXKo,3594
89
+ pydantic_ai/providers/github.py,sha256=Mp6-piXuRe5R0Iu4p0N06aIZgX7rJe5KRzCjt9E4OK4,4378
90
+ pydantic_ai/providers/google.py,sha256=iLXcKUl5r7wdLuZtT1IM3obGZi7ecLM_PDyWdQKDncI,6038
91
+ pydantic_ai/providers/google_gla.py,sha256=dLkDxps5gEtxsQiDbs1e88lXLYeX4i2qnJtDiFFJ0Ng,1965
92
+ pydantic_ai/providers/google_vertex.py,sha256=tAR3L1DZPDvGOJsKyGkIRPeXL7wjly4CvqTWMK1ozVQ,9752
93
+ pydantic_ai/providers/grok.py,sha256=s9Y_iYkYCBc7UbP2ppGOUdAP_04xrkmPBHq3q3Qr9eE,3109
94
+ pydantic_ai/providers/groq.py,sha256=3XuYqvugToJhTf7kQCdtdaTpFsiqAu_pwnIQnHm04uo,4913
95
+ pydantic_ai/providers/heroku.py,sha256=wA36vh0ldpdaj33FPtfo4roY_MhaCqErjLyGtcbC6Xs,2958
96
+ pydantic_ai/providers/huggingface.py,sha256=MLAv-Z99Kii5Faolq97_0Ir1LUKH9CwRmJFaI5RvwW4,4914
97
+ pydantic_ai/providers/litellm.py,sha256=3hTCjHWRG_1c4S9JSNm0BDBDi4q6BVVZ3OLSXhTndNM,5079
98
+ pydantic_ai/providers/mistral.py,sha256=ZxfOQNB2RADtHeGLQrhxHwq6cXpBi3LMgIUa_9wXoug,3088
99
+ pydantic_ai/providers/moonshotai.py,sha256=LwasmxCZCPkq1pb1uDtZTEb_nE55bAtX3QXgLmuNlHE,3260
100
+ pydantic_ai/providers/ollama.py,sha256=_bxons0p8g0RSPNV8iq3AScVS1ym27QTW4zhDqSakgY,4633
101
+ pydantic_ai/providers/openai.py,sha256=xCpR2c7QnYQukiJJKiFTSaGSewPFht7ekTasJDjSimA,3071
102
+ pydantic_ai/providers/openrouter.py,sha256=PXGgHPtlQQHKFaSnmiswWZ3dTvmT9PAg-NvfRYGjrPw,4154
103
+ pydantic_ai/providers/together.py,sha256=Dln_NgCul1XVOQtNaYvqWrNjOWj9XzA8n4NwNMKkbLk,3450
104
+ pydantic_ai/providers/vercel.py,sha256=Q7pPvzaoh7Uiqq7CD8TxaWnXnXRKYgWJRwQXSYm0ZKQ,4257
105
+ pydantic_ai/toolsets/__init__.py,sha256=lYwnxjSqxY6rIYYDTDctyWPckDwnRX_9orvqY2Ap2B8,806
106
+ pydantic_ai/toolsets/_dynamic.py,sha256=ETAtKW2A_aFjSIPO3pRIZNKH54qNfHQB7WtmEjWqHzc,2939
107
+ pydantic_ai/toolsets/abstract.py,sha256=CXsDF37JkBWcy9hwrgdBe4gqgocNcPKOFEIvQ7t9Ysk,7751
108
+ pydantic_ai/toolsets/approval_required.py,sha256=zyYGEx2VqprLed16OXg1QWr81rnAB0CmAzTeyQJ9A4o,1100
109
+ pydantic_ai/toolsets/combined.py,sha256=LQzm_g6gskiHRUMFDvm88SSrz8OGxbdxyHiKzQrMBNU,4026
110
+ pydantic_ai/toolsets/external.py,sha256=J9mWQm1HLbRCOJwpLBIvUZZGR_ywSB7pz8MrXkRNBoU,1736
111
+ pydantic_ai/toolsets/filtered.py,sha256=PSQG9EbBYJpHUEBb_4TGzhjAcQPo5aPKvTuReeoWYtQ,864
112
+ pydantic_ai/toolsets/function.py,sha256=D_NllIGtC4P4Q8QAxH82743ykbUaSTtQp-HpOSFaRgk,12907
113
+ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fbh5s,1426
114
+ pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
115
+ pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
116
+ pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
117
+ pydantic_ai_slim-1.0.0.dist-info/METADATA,sha256=SK5j_7jRQS64VQxZNbAQkx9JpqOQePo8DeKtbg4KGH4,4601
118
+ pydantic_ai_slim-1.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
119
+ pydantic_ai_slim-1.0.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
120
+ pydantic_ai_slim-1.0.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
121
+ pydantic_ai_slim-1.0.0.dist-info/RECORD,,