pydantic-ai-slim 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -207,7 +207,7 @@ class CohereModel(Model):
207
207
  if content.type == 'text':
208
208
  parts.append(TextPart(content=content.text))
209
209
  elif content.type == 'thinking': # pragma: no branch
210
- parts.append(ThinkingPart(content=cast(str, content.thinking))) # pyright: ignore[reportUnknownMemberType,reportAttributeAccessIssue] - https://github.com/cohere-ai/cohere-python/issues/692
210
+ parts.append(ThinkingPart(content=content.thinking))
211
211
  for c in response.message.tool_calls or []:
212
212
  if c.function and c.function.name and c.function.arguments: # pragma: no branch
213
213
  parts.append(
@@ -258,7 +258,7 @@ class CohereModel(Model):
258
258
  if texts or thinking:
259
259
  contents: list[AssistantMessageV2ContentItem] = []
260
260
  if thinking:
261
- contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking))) # pyright: ignore[reportCallIssue] - https://github.com/cohere-ai/cohere-python/issues/692
261
+ contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking)))
262
262
  if texts: # pragma: no branch
263
263
  contents.append(TextAssistantMessageV2ContentItem(text='\n\n'.join(texts)))
264
264
  message_param.content = contents
@@ -222,6 +222,17 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
222
222
  `medium`, and `high`.
223
223
  """
224
224
 
225
+ openai_previous_response_id: Literal['auto'] | str
226
+ """The ID of a previous response from the model to use as the starting point for a continued conversation.
227
+
228
+ When set to `'auto'`, the request automatically uses the most recent
229
+ `provider_response_id` from the message history and omits earlier messages.
230
+
231
+ This enables the model to use server-side conversation state and faithfully reference previous reasoning.
232
+ See the [OpenAI Responses API documentation](https://platform.openai.com/docs/guides/reasoning#keeping-reasoning-items-in-context)
233
+ for more information.
234
+ """
235
+
225
236
 
226
237
  @dataclass(init=False)
227
238
  class OpenAIChatModel(Model):
@@ -977,6 +988,10 @@ class OpenAIResponsesModel(Model):
977
988
  else:
978
989
  tool_choice = 'auto'
979
990
 
991
+ previous_response_id = model_settings.get('openai_previous_response_id')
992
+ if previous_response_id == 'auto':
993
+ previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
994
+
980
995
  instructions, openai_messages = await self._map_messages(messages, model_settings)
981
996
  reasoning = self._get_reasoning(model_settings)
982
997
 
@@ -1027,6 +1042,7 @@ class OpenAIResponsesModel(Model):
1027
1042
  truncation=model_settings.get('openai_truncation', NOT_GIVEN),
1028
1043
  timeout=model_settings.get('timeout', NOT_GIVEN),
1029
1044
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
1045
+ previous_response_id=previous_response_id,
1030
1046
  reasoning=reasoning,
1031
1047
  user=model_settings.get('openai_user', NOT_GIVEN),
1032
1048
  text=text or NOT_GIVEN,
@@ -1092,6 +1108,28 @@ class OpenAIResponsesModel(Model):
1092
1108
  ),
1093
1109
  }
1094
1110
 
1111
+ def _get_previous_response_id_and_new_messages(
1112
+ self, messages: list[ModelMessage]
1113
+ ) -> tuple[str | None, list[ModelMessage]]:
1114
+ # When `openai_previous_response_id` is set to 'auto', the most recent
1115
+ # `provider_response_id` from the message history is selected and all
1116
+ # earlier messages are omitted. This allows the OpenAI SDK to reuse
1117
+ # server-side history for efficiency. The returned tuple contains the
1118
+ # `previous_response_id` (if found) and the trimmed list of messages.
1119
+ previous_response_id = None
1120
+ trimmed_messages: list[ModelMessage] = []
1121
+ for m in reversed(messages):
1122
+ if isinstance(m, ModelResponse) and m.provider_name == self.system:
1123
+ previous_response_id = m.provider_response_id
1124
+ break
1125
+ else:
1126
+ trimmed_messages.append(m)
1127
+
1128
+ if previous_response_id and trimmed_messages:
1129
+ return previous_response_id, list(reversed(trimmed_messages))
1130
+ else:
1131
+ return None, messages
1132
+
1095
1133
  async def _map_messages( # noqa: C901
1096
1134
  self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1097
1135
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
@@ -1135,7 +1173,7 @@ class OpenAIResponsesModel(Model):
1135
1173
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1136
1174
  for item in message.parts:
1137
1175
  if isinstance(item, TextPart):
1138
- if item.id and item.id.startswith('msg_'):
1176
+ if item.id and message.provider_name == self.system:
1139
1177
  if message_item is None or message_item['id'] != item.id: # pragma: no branch
1140
1178
  message_item = responses.ResponseOutputMessageParam(
1141
1179
  role='assistant',
@@ -1164,26 +1202,33 @@ class OpenAIResponsesModel(Model):
1164
1202
  elif isinstance(item, ThinkingPart):
1165
1203
  if (
1166
1204
  item.id
1167
- and item.provider_name == self.system
1168
- and OpenAIModelProfile.from_profile(
1169
- self.profile
1170
- ).openai_supports_encrypted_reasoning_content
1205
+ and message.provider_name == self.system
1171
1206
  and model_settings.get('openai_send_reasoning_ids', True)
1172
1207
  ):
1208
+ signature: str | None = None
1173
1209
  if (
1174
- reasoning_item is None
1175
- or reasoning_item['id'] != item.id
1176
- and (item.signature or item.content)
1210
+ item.signature
1211
+ and item.provider_name == self.system
1212
+ and OpenAIModelProfile.from_profile(
1213
+ self.profile
1214
+ ).openai_supports_encrypted_reasoning_content
1215
+ ):
1216
+ signature = item.signature
1217
+
1218
+ if (reasoning_item is None or reasoning_item['id'] != item.id) and (
1219
+ signature or item.content
1177
1220
  ): # pragma: no branch
1178
1221
  reasoning_item = responses.ResponseReasoningItemParam(
1179
1222
  id=item.id,
1180
1223
  summary=[],
1181
- encrypted_content=item.signature,
1224
+ encrypted_content=signature,
1182
1225
  type='reasoning',
1183
1226
  )
1184
1227
  openai_messages.append(reasoning_item)
1185
1228
 
1186
1229
  if item.content:
1230
+ # The check above guarantees that `reasoning_item` is not None
1231
+ assert reasoning_item is not None
1187
1232
  reasoning_item['summary'] = [
1188
1233
  *reasoning_item['summary'],
1189
1234
  Summary(text=item.content, type='summary_text'),
pydantic_ai/run.py CHANGED
@@ -48,7 +48,6 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
48
48
  [
49
49
  UserPromptNode(
50
50
  user_prompt='What is the capital of France?',
51
- instructions=None,
52
51
  instructions_functions=[],
53
52
  system_prompts=(),
54
53
  system_prompt_functions=[],
@@ -183,7 +182,6 @@ class AgentRun(Generic[AgentDepsT, OutputDataT]):
183
182
  [
184
183
  UserPromptNode(
185
184
  user_prompt='What is the capital of France?',
186
- instructions=None,
187
185
  instructions_functions=[],
188
186
  system_prompts=(),
189
187
  system_prompt_functions=[],
pydantic_ai/tools.py CHANGED
@@ -255,6 +255,7 @@ class Tool(Generic[AgentDepsT]):
255
255
  strict: bool | None
256
256
  sequential: bool
257
257
  requires_approval: bool
258
+ metadata: dict[str, Any] | None
258
259
  function_schema: _function_schema.FunctionSchema
259
260
  """
260
261
  The base JSON schema for the tool's parameters.
@@ -277,6 +278,7 @@ class Tool(Generic[AgentDepsT]):
277
278
  strict: bool | None = None,
278
279
  sequential: bool = False,
279
280
  requires_approval: bool = False,
281
+ metadata: dict[str, Any] | None = None,
280
282
  function_schema: _function_schema.FunctionSchema | None = None,
281
283
  ):
282
284
  """Create a new tool instance.
@@ -332,6 +334,7 @@ class Tool(Generic[AgentDepsT]):
332
334
  sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
333
335
  requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
334
336
  See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
337
+ metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
335
338
  function_schema: The function schema to use for the tool. If not provided, it will be generated.
336
339
  """
337
340
  self.function = function
@@ -352,6 +355,7 @@ class Tool(Generic[AgentDepsT]):
352
355
  self.strict = strict
353
356
  self.sequential = sequential
354
357
  self.requires_approval = requires_approval
358
+ self.metadata = metadata
355
359
 
356
360
  @classmethod
357
361
  def from_schema(
@@ -406,6 +410,7 @@ class Tool(Generic[AgentDepsT]):
406
410
  parameters_json_schema=self.function_schema.json_schema,
407
411
  strict=self.strict,
408
412
  sequential=self.sequential,
413
+ metadata=self.metadata,
409
414
  )
410
415
 
411
416
  async def prepare_tool_def(self, ctx: RunContext[AgentDepsT]) -> ToolDefinition | None:
@@ -488,6 +493,12 @@ class ToolDefinition:
488
493
  See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
489
494
  """
490
495
 
496
+ metadata: dict[str, Any] | None = None
497
+ """Tool metadata that can be set by the toolset this tool came from. It is not sent to the model, but can be used for filtering and tool behavior customization.
498
+
499
+ For MCP tools, this contains the `meta`, `annotations`, and `output_schema` fields from the tool definition.
500
+ """
501
+
491
502
  @property
492
503
  def defer(self) -> bool:
493
504
  """Whether calls to this tool will be deferred.
@@ -45,18 +45,21 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
45
45
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [],
46
46
  *,
47
47
  max_retries: int = 1,
48
- id: str | None = None,
49
48
  docstring_format: DocstringFormat = 'auto',
50
49
  require_parameter_descriptions: bool = False,
51
50
  schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
51
+ strict: bool | None = None,
52
+ sequential: bool = False,
53
+ requires_approval: bool = False,
54
+ metadata: dict[str, Any] | None = None,
55
+ id: str | None = None,
52
56
  ):
53
57
  """Build a new function toolset.
54
58
 
55
59
  Args:
56
60
  tools: The tools to add to the toolset.
57
61
  max_retries: The maximum number of retries for each tool during a run.
58
- id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal,
59
- in which case the ID will be used to identify the toolset's activities within the workflow.
62
+ Applies to all tools, unless overridden when adding a tool.
60
63
  docstring_format: Format of tool docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat].
61
64
  Defaults to `'auto'`, such that the format is inferred from the structure of the docstring.
62
65
  Applies to all tools, unless overridden when adding a tool.
@@ -64,12 +67,27 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
64
67
  Applies to all tools, unless overridden when adding a tool.
65
68
  schema_generator: The JSON schema generator class to use for this tool. Defaults to `GenerateToolJsonSchema`.
66
69
  Applies to all tools, unless overridden when adding a tool.
70
+ strict: Whether to enforce JSON schema compliance (only affects OpenAI).
71
+ See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
72
+ sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
73
+ Applies to all tools, unless overridden when adding a tool.
74
+ requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
75
+ See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
76
+ Applies to all tools, unless overridden when adding a tool.
77
+ metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
78
+ Applies to all tools, unless overridden when adding a tool, which will be merged with the toolset's metadata.
79
+ id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used in a durable execution environment like Temporal,
80
+ in which case the ID will be used to identify the toolset's activities within the workflow.
67
81
  """
68
82
  self.max_retries = max_retries
69
83
  self._id = id
70
84
  self.docstring_format = docstring_format
71
85
  self.require_parameter_descriptions = require_parameter_descriptions
72
86
  self.schema_generator = schema_generator
87
+ self.strict = strict
88
+ self.sequential = sequential
89
+ self.requires_approval = requires_approval
90
+ self.metadata = metadata
73
91
 
74
92
  self.tools = {}
75
93
  for tool in tools:
@@ -97,8 +115,9 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
97
115
  require_parameter_descriptions: bool | None = None,
98
116
  schema_generator: type[GenerateJsonSchema] | None = None,
99
117
  strict: bool | None = None,
100
- sequential: bool = False,
101
- requires_approval: bool = False,
118
+ sequential: bool | None = None,
119
+ requires_approval: bool | None = None,
120
+ metadata: dict[str, Any] | None = None,
102
121
  ) -> Callable[[ToolFuncEither[AgentDepsT, ToolParams]], ToolFuncEither[AgentDepsT, ToolParams]]: ...
103
122
 
104
123
  def tool(
@@ -113,8 +132,9 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
113
132
  require_parameter_descriptions: bool | None = None,
114
133
  schema_generator: type[GenerateJsonSchema] | None = None,
115
134
  strict: bool | None = None,
116
- sequential: bool = False,
117
- requires_approval: bool = False,
135
+ sequential: bool | None = None,
136
+ requires_approval: bool | None = None,
137
+ metadata: dict[str, Any] | None = None,
118
138
  ) -> Any:
119
139
  """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument.
120
140
 
@@ -163,9 +183,14 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
163
183
  If `None`, the default value is determined by the toolset.
164
184
  strict: Whether to enforce JSON schema compliance (only affects OpenAI).
165
185
  See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
186
+ If `None`, the default value is determined by the toolset.
166
187
  sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
188
+ If `None`, the default value is determined by the toolset.
167
189
  requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
168
190
  See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
191
+ If `None`, the default value is determined by the toolset.
192
+ metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
193
+ If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
169
194
  """
170
195
 
171
196
  def tool_decorator(
@@ -184,6 +209,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
184
209
  strict,
185
210
  sequential,
186
211
  requires_approval,
212
+ metadata,
187
213
  )
188
214
  return func_
189
215
 
@@ -200,8 +226,9 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
200
226
  require_parameter_descriptions: bool | None = None,
201
227
  schema_generator: type[GenerateJsonSchema] | None = None,
202
228
  strict: bool | None = None,
203
- sequential: bool = False,
204
- requires_approval: bool = False,
229
+ sequential: bool | None = None,
230
+ requires_approval: bool | None = None,
231
+ metadata: dict[str, Any] | None = None,
205
232
  ) -> None:
206
233
  """Add a function as a tool to the toolset.
207
234
 
@@ -227,9 +254,14 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
227
254
  If `None`, the default value is determined by the toolset.
228
255
  strict: Whether to enforce JSON schema compliance (only affects OpenAI).
229
256
  See [`ToolDefinition`][pydantic_ai.tools.ToolDefinition] for more info.
257
+ If `None`, the default value is determined by the toolset.
230
258
  sequential: Whether the function requires a sequential/serial execution environment. Defaults to False.
259
+ If `None`, the default value is determined by the toolset.
231
260
  requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False.
232
261
  See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info.
262
+ If `None`, the default value is determined by the toolset.
263
+ metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization.
264
+ If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata.
233
265
  """
234
266
  if docstring_format is None:
235
267
  docstring_format = self.docstring_format
@@ -237,6 +269,12 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
237
269
  require_parameter_descriptions = self.require_parameter_descriptions
238
270
  if schema_generator is None:
239
271
  schema_generator = self.schema_generator
272
+ if strict is None:
273
+ strict = self.strict
274
+ if sequential is None:
275
+ sequential = self.sequential
276
+ if requires_approval is None:
277
+ requires_approval = self.requires_approval
240
278
 
241
279
  tool = Tool[AgentDepsT](
242
280
  func,
@@ -250,6 +288,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
250
288
  strict=strict,
251
289
  sequential=sequential,
252
290
  requires_approval=requires_approval,
291
+ metadata=metadata,
253
292
  )
254
293
  self.add_tool(tool)
255
294
 
@@ -263,6 +302,8 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
263
302
  raise UserError(f'Tool name conflicts with existing tool: {tool.name!r}')
264
303
  if tool.max_retries is None:
265
304
  tool.max_retries = self.max_retries
305
+ if self.metadata is not None:
306
+ tool.metadata = self.metadata | (tool.metadata or {})
266
307
  self.tools[tool.name] = tool
267
308
 
268
309
  async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.5
36
+ Requires-Dist: pydantic-graph==1.0.7
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -51,13 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
51
51
  Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
52
52
  Requires-Dist: rich>=13; extra == 'cli'
53
53
  Provides-Extra: cohere
54
- Requires-Dist: cohere>=5.17.0; (platform_system != 'Emscripten') and extra == 'cohere'
54
+ Requires-Dist: cohere>=5.18.0; (platform_system != 'Emscripten') and extra == 'cohere'
55
55
  Provides-Extra: dbos
56
56
  Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.5; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.7; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -1,7 +1,7 @@
1
1
  pydantic_ai/__init__.py,sha256=CfqGPSjKlDl5iw1L48HbELsDuzxIzBFnFnovI_GcFWA,2083
2
2
  pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
3
3
  pydantic_ai/_a2a.py,sha256=2Hopcyl6o6U91eVkd7iAbEPYA5f0hJb8A5_fwMC0UfM,12168
4
- pydantic_ai/_agent_graph.py,sha256=dO2RClUNQwLeKIpwB3JsbNYy6KNx2eXCrf4BIJ7p0qo,47622
4
+ pydantic_ai/_agent_graph.py,sha256=OFH5mbuDDIjE3T55XTBpJmjLPLH4PFRSgoXjXDYgou8,50884
5
5
  pydantic_ai/_cli.py,sha256=C-Uvbdx9wWnNqZKHN_r8d4mGte_aIPikOkKrTPvdrN8,14057
6
6
  pydantic_ai/_function_schema.py,sha256=olbmUMQoQV5qKV4j0-cOnhcTINz4uYyeDqMyusrFRtY,11234
7
7
  pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
@@ -14,48 +14,48 @@ pydantic_ai/_system_prompt.py,sha256=WdDW_DTGHujcFFaK-J7J6mA4ZDJZ0IOKpyizJA-1Y5Q
14
14
  pydantic_ai/_thinking_part.py,sha256=x80-Vkon16GOyq3W6f2qzafTVPC5dCgF7QD3k8ZMmYU,1304
15
15
  pydantic_ai/_tool_manager.py,sha256=hB_QzVxnGEbB7ZT2UUDeKeLm_Cv0F-0oCPwInxR-8NE,10369
16
16
  pydantic_ai/_utils.py,sha256=xa2PoAcTN-oXhfXOONOighmue-jtSv668o9Fu_IdO0A,16062
17
- pydantic_ai/ag_ui.py,sha256=Pp-R6XeHip1oQ6_jqV79JyE4TMQ0VOwb99pHxoGdsuU,28911
18
- pydantic_ai/builtin_tools.py,sha256=t0wa6KsgDCRoZMKJKRzRDyxaz1X4mDWMHlGjQmqFLdg,3222
17
+ pydantic_ai/ag_ui.py,sha256=ZR-LuuaydrBLmwfCNHlClv2F9aWYe0wCka4IooTQkf0,29249
18
+ pydantic_ai/builtin_tools.py,sha256=l4GLWM54yXa1lqBM-o2WMiTx51nhMZRPS7ufleEn474,3301
19
19
  pydantic_ai/direct.py,sha256=zMsz6poVgEq7t7L_8FWM6hmKdqTzjyQYL5xzQt_59Us,14951
20
20
  pydantic_ai/exceptions.py,sha256=zsXZMKf2BJuVsfuHl1fWTkogLU37bd4yq7D6BKHAzVs,4968
21
21
  pydantic_ai/format_prompt.py,sha256=37imBG2Fgpn-_RfAFalOX8Xc_XpGH2gY9tnhJDvxfk8,4243
22
- pydantic_ai/mcp.py,sha256=cmgi3Nq_qe1cTqs-R92WMfZw6bwjSqy2R6NiR7isPcQ,30364
22
+ pydantic_ai/mcp.py,sha256=N1X5zldNeNJmH9EHnccLxXU4Pw7tBCdxFJzzbTOVAnE,34778
23
23
  pydantic_ai/messages.py,sha256=bq9Ps-CsYkXdkq4eu1gmIoiLiYsFTwEzB4fXUF_neic,55865
24
24
  pydantic_ai/output.py,sha256=wzNgVKJgxyXtSH-uNbRxIaUNLidxlQcwWYT2o1gY2hE,12037
25
25
  pydantic_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  pydantic_ai/result.py,sha256=FrJbd0nwaRVIxGH_EhV-ITQvrrd-JaDya9EDsE5-Pps,25389
27
27
  pydantic_ai/retries.py,sha256=QM4oDA9DG-Y2qP06fbCp8Dqq8ups40Rr4HYjAOlbNyM,14650
28
- pydantic_ai/run.py,sha256=qpTu2Q2O3lvcQAZREuIpyL0vQN13AvW99SwD7Oe9hKc,15175
28
+ pydantic_ai/run.py,sha256=wHlWl4CXIHLcgo2R8PlsU3Pjn0vuMLFfP8D6Fbany-Y,15097
29
29
  pydantic_ai/settings.py,sha256=0mr6KudxKKjTG8e3nsv_8vDLxNhu_1-WvefCOzCGSYM,3565
30
- pydantic_ai/tools.py,sha256=McOPcViIGCjJzoyoipUTrttPPRMsIkbbm9ike8PaS8c,19712
30
+ pydantic_ai/tools.py,sha256=dCecmJtRkF1ioqFYbfT00XGGqzGB4PPO9n6IrHCQtnc,20343
31
31
  pydantic_ai/usage.py,sha256=UoSOwhH-NTAeXl7Tq8GWXcW82m8zQLQvThvQehEx08g,14070
32
- pydantic_ai/agent/__init__.py,sha256=WES-ZG7s7QQOOBIMN8fE9nXRTt_zD_M0WFsgwuz1O7c,62499
33
- pydantic_ai/agent/abstract.py,sha256=h1e9cWv2N9qsXWhkJmJW-LJehZu3bFFOsfzRH6o7O84,44551
34
- pydantic_ai/agent/wrapper.py,sha256=--IJo8Yb-2uzcCBSIB9oB_9FQ1R7yZYkWnLSq0iUExs,9464
32
+ pydantic_ai/agent/__init__.py,sha256=YM-LCruUM4o6F1c1kE-UCpJWzonKFPxgnVWls6bnUXI,62610
33
+ pydantic_ai/agent/abstract.py,sha256=fL2nD5XgLHfmva6t-foBENpLHV_WYTUWLGBKU-l8stM,44622
34
+ pydantic_ai/agent/wrapper.py,sha256=lx0NcM8MX_MoNm0oiPFDH2Cod78N5ONcerKcpJQeJes,9425
35
35
  pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  pydantic_ai/common_tools/duckduckgo.py,sha256=cJd-BUg-i50E0QjKveRCndGlU5GdvLq9UgNNJ18VQIQ,2263
37
37
  pydantic_ai/common_tools/tavily.py,sha256=Q1xxSF5HtXAaZ10Pp-OaDOHXwJf2mco9wScGEQXD7E4,2495
38
38
  pydantic_ai/durable_exec/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
39
  pydantic_ai/durable_exec/dbos/__init__.py,sha256=H_dT0ERuNCBP0Im8eVGl8F9h7E9Aj87-pvmnLpDelF0,199
40
- pydantic_ai/durable_exec/dbos/_agent.py,sha256=IsGDvqQ4Jc5QPTNi9nVEKDc96b8p6SKYo80Yz9CjdhI,32499
41
- pydantic_ai/durable_exec/dbos/_mcp_server.py,sha256=XeDeBnyNwonPMZaSd2IMDf3ye0CTgYO9b9Vh8ZoeT6A,3009
40
+ pydantic_ai/durable_exec/dbos/_agent.py,sha256=C03G7Sy_aS68GDr8gB3tgEnxBH6fkfTSEz0UXI4gfh4,32584
41
+ pydantic_ai/durable_exec/dbos/_mcp_server.py,sha256=8RsDGqAluVOiGPuSl37XjaOEZvdN0LfwuqqZc7o4foE,3047
42
42
  pydantic_ai/durable_exec/dbos/_model.py,sha256=XNGLKJ5a9znUWnMSIA0SzxvQmUJREWzcVDrnfdjMoac,5115
43
43
  pydantic_ai/durable_exec/dbos/_utils.py,sha256=_aNceFvTcNeqb78sTDYM2TdYph85tbdeLueyXY1lbTA,242
44
44
  pydantic_ai/durable_exec/temporal/__init__.py,sha256=XKwy68wfgmjr057nolRwGHTKiadxufpQEGEUprAV09k,5563
45
- pydantic_ai/durable_exec/temporal/_agent.py,sha256=M2i-uKxlbneHMmSgABwpB5dMzM3SEyhVRds3A-DQ-Jw,36869
45
+ pydantic_ai/durable_exec/temporal/_agent.py,sha256=9X1cPjQ72lihmoeOvmcPsYOQ18S-5qlHPOanH97vKZ0,36830
46
46
  pydantic_ai/durable_exec/temporal/_function_toolset.py,sha256=Hnfz3ukOqgq8j3h9u97S-fMfq4un1HZA4kxN2irWD_0,5562
47
- pydantic_ai/durable_exec/temporal/_logfire.py,sha256=5bSiOt-jihQATJsg-jrGmEqP3RWW_Sz6c2aicjt03lI,2009
47
+ pydantic_ai/durable_exec/temporal/_logfire.py,sha256=ASd7vb0cd61yESI0mgU2w9SCGxsOegz95HtQjKdlQkE,2472
48
48
  pydantic_ai/durable_exec/temporal/_mcp_server.py,sha256=VFvHPVhvYz-ITGaXXNyuWwB8tsdF3Hg9rs7gss8TKWY,6032
49
49
  pydantic_ai/durable_exec/temporal/_model.py,sha256=cFHrk-yM65d41TPiWp5hwZEqXBNO6lNMtcU587j1b58,6765
50
50
  pydantic_ai/durable_exec/temporal/_run_context.py,sha256=IMLEW4AqHklumLiRBUTW-ogJGiH_tX3nCrFrxD7CbFw,2390
51
- pydantic_ai/durable_exec/temporal/_toolset.py,sha256=HxmQ5vut7Zd5eyrC27eNNn5_CHA_4-yJL_Pk8cKZSOs,2892
51
+ pydantic_ai/durable_exec/temporal/_toolset.py,sha256=bnMbmR8JmBjBeWGaAMtgWP9Kb930nu0jzLS1T5Z9VEU,2978
52
52
  pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
53
  pydantic_ai/ext/aci.py,sha256=sUllKDNO-LOMurbFgxwRHuzNlBkSa3aVBqXfEm-A_vo,2545
54
54
  pydantic_ai/ext/langchain.py,sha256=iLVEZv1kcLkdIHo3us2yfdi0kVqyJ6qTaCt9BoLWm4k,2335
55
55
  pydantic_ai/models/__init__.py,sha256=na9M98DMJ0VpsYhcJ9WI80EI0278XJEJ9jIE_hlW6q4,36256
56
56
  pydantic_ai/models/anthropic.py,sha256=-dH4qYSRlRD1XiC1wR89oGHKnFTjxP8zQh0scQDkTCk,32768
57
57
  pydantic_ai/models/bedrock.py,sha256=wHo65QNEsfsb1UaUv_TpvJ0WrgFoKoegB6I3eDVnORI,33393
58
- pydantic_ai/models/cohere.py,sha256=Oly6wpw7Xj0z-690foknLK2z2F9ukDOjxQCGBFFJKkk,13898
58
+ pydantic_ai/models/cohere.py,sha256=uQLynz-zWciZBHuvkm8HxJyTOee1bs3pSka-x-56a98,13668
59
59
  pydantic_ai/models/fallback.py,sha256=XJ74wRxVT4dF0uewHH3is9I-zcLBK8KFIhpK3BB6mRw,5526
60
60
  pydantic_ai/models/function.py,sha256=9ZuRDQXChiA_S3a_M9tmmYQwlyuUEFZ20aYrnPqdTz8,14599
61
61
  pydantic_ai/models/gemini.py,sha256=DYEaOnwGmo9FUGVkRRrydGuQwYhnO-Cq5grTurLWgb4,39376
@@ -65,7 +65,7 @@ pydantic_ai/models/huggingface.py,sha256=f1tZObCJkcbiUCwNoPyuiaRaGYuj0GBFmbA8yFd
65
65
  pydantic_ai/models/instrumented.py,sha256=DCnyG7HXgV-W2EWac8oZb2A8PL8yarXeU7Rt97l4w_s,21421
66
66
  pydantic_ai/models/mcp_sampling.py,sha256=qnLCO3CB5bNQ86SpWRA-CSSOVcCCLPwjHtcNFvW9wHs,3461
67
67
  pydantic_ai/models/mistral.py,sha256=ru8EHwFS0xZBN6s1tlssUdjxjQyjB9L_8kFH7qq5U_g,33654
68
- pydantic_ai/models/openai.py,sha256=AsAqJIjTeN2d-WX0wPLM9phbEZHoR8PvWkfplRWUp7U,74021
68
+ pydantic_ai/models/openai.py,sha256=r5hTJIjwxDc9HMEZt44CA8JZ07YFRpgPFYm5VHoH3ak,76377
69
69
  pydantic_ai/models/test.py,sha256=1kBwi7pSUt9_K1U-hokOilplxJWPQ3KRKH_s8bYmt_s,19969
70
70
  pydantic_ai/models/wrapper.py,sha256=9MeHW7mXPsEK03IKL0rtjeX6QgXyZROOOzLh72GiX2k,2148
71
71
  pydantic_ai/profiles/__init__.py,sha256=V6uGAVJuIaYRuZOQjkdIyFfDKD5py18RC98njnHOFug,3293
@@ -115,13 +115,13 @@ pydantic_ai/toolsets/approval_required.py,sha256=zyYGEx2VqprLed16OXg1QWr81rnAB0C
115
115
  pydantic_ai/toolsets/combined.py,sha256=LQzm_g6gskiHRUMFDvm88SSrz8OGxbdxyHiKzQrMBNU,4026
116
116
  pydantic_ai/toolsets/external.py,sha256=J9mWQm1HLbRCOJwpLBIvUZZGR_ywSB7pz8MrXkRNBoU,1736
117
117
  pydantic_ai/toolsets/filtered.py,sha256=PSQG9EbBYJpHUEBb_4TGzhjAcQPo5aPKvTuReeoWYtQ,864
118
- pydantic_ai/toolsets/function.py,sha256=v7KDeTaJhpeF58Wyvgtwji5m5_ejFb1e538T8w-0Qfo,13304
118
+ pydantic_ai/toolsets/function.py,sha256=6QQclnwbelLXXdTGnS4JuU21J_nKHdklgOscL0QGDuQ,16203
119
119
  pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fbh5s,1426
120
120
  pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
121
121
  pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
122
122
  pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
123
- pydantic_ai_slim-1.0.5.dist-info/METADATA,sha256=Q3hdkuEKOT2YbEgzFhTBzofRVHl_wZZMFc5nXCgsTEY,4627
124
- pydantic_ai_slim-1.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
125
- pydantic_ai_slim-1.0.5.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
126
- pydantic_ai_slim-1.0.5.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
127
- pydantic_ai_slim-1.0.5.dist-info/RECORD,,
123
+ pydantic_ai_slim-1.0.7.dist-info/METADATA,sha256=D6apX2BLI7fr9hKpuHvEAMmq7ryLi4jvsneWj3hA_MQ,4627
124
+ pydantic_ai_slim-1.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
125
+ pydantic_ai_slim-1.0.7.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
126
+ pydantic_ai_slim-1.0.7.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
127
+ pydantic_ai_slim-1.0.7.dist-info/RECORD,,