pydantic-ai-slim 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +2 -0
- pydantic_ai/_agent_graph.py +10 -2
- pydantic_ai/agent/__init__.py +3 -11
- pydantic_ai/builtin_tools.py +71 -0
- pydantic_ai/models/__init__.py +11 -3
- pydantic_ai/models/anthropic.py +130 -9
- pydantic_ai/models/google.py +26 -14
- pydantic_ai/models/openai.py +190 -13
- {pydantic_ai_slim-1.3.0.dist-info → pydantic_ai_slim-1.4.0.dist-info}/METADATA +3 -3
- {pydantic_ai_slim-1.3.0.dist-info → pydantic_ai_slim-1.4.0.dist-info}/RECORD +13 -13
- {pydantic_ai_slim-1.3.0.dist-info → pydantic_ai_slim-1.4.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.3.0.dist-info → pydantic_ai_slim-1.4.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.3.0.dist-info → pydantic_ai_slim-1.4.0.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@ from .agent import (
|
|
|
12
12
|
from .builtin_tools import (
|
|
13
13
|
CodeExecutionTool,
|
|
14
14
|
ImageGenerationTool,
|
|
15
|
+
MCPServerTool,
|
|
15
16
|
MemoryTool,
|
|
16
17
|
UrlContextTool,
|
|
17
18
|
WebSearchTool,
|
|
@@ -213,6 +214,7 @@ __all__ = (
|
|
|
213
214
|
'CodeExecutionTool',
|
|
214
215
|
'ImageGenerationTool',
|
|
215
216
|
'MemoryTool',
|
|
217
|
+
'MCPServerTool',
|
|
216
218
|
# output
|
|
217
219
|
'ToolOutput',
|
|
218
220
|
'NativeOutput',
|
pydantic_ai/_agent_graph.py
CHANGED
|
@@ -588,7 +588,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
588
588
|
# as the empty response and request will not create any items in the API payload,
|
|
589
589
|
# in the hope the model will return a non-empty response this time.
|
|
590
590
|
ctx.state.increment_retries(ctx.deps.max_result_retries, model_settings=ctx.deps.model_settings)
|
|
591
|
-
|
|
591
|
+
run_context = build_run_context(ctx)
|
|
592
|
+
instructions = await ctx.deps.get_instructions(run_context)
|
|
593
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
|
|
594
|
+
_messages.ModelRequest(parts=[], instructions=instructions)
|
|
595
|
+
)
|
|
592
596
|
return
|
|
593
597
|
|
|
594
598
|
text = ''
|
|
@@ -652,7 +656,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
652
656
|
ctx.state.increment_retries(
|
|
653
657
|
ctx.deps.max_result_retries, error=e, model_settings=ctx.deps.model_settings
|
|
654
658
|
)
|
|
655
|
-
|
|
659
|
+
run_context = build_run_context(ctx)
|
|
660
|
+
instructions = await ctx.deps.get_instructions(run_context)
|
|
661
|
+
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
|
|
662
|
+
_messages.ModelRequest(parts=[e.tool_retry], instructions=instructions)
|
|
663
|
+
)
|
|
656
664
|
|
|
657
665
|
self._events_iterator = _run_stream()
|
|
658
666
|
|
pydantic_ai/agent/__init__.py
CHANGED
|
@@ -542,6 +542,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
542
542
|
"""
|
|
543
543
|
if infer_name and self.name is None:
|
|
544
544
|
self._infer_name(inspect.currentframe())
|
|
545
|
+
|
|
545
546
|
model_used = self._get_model(model)
|
|
546
547
|
del model
|
|
547
548
|
|
|
@@ -607,16 +608,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
607
608
|
else:
|
|
608
609
|
instrumentation_settings = None
|
|
609
610
|
tracer = NoOpTracer()
|
|
610
|
-
|
|
611
|
-
# Deduplicate builtin tools passed to the agent and the run based on type
|
|
612
|
-
builtin_tools = list(
|
|
613
|
-
{
|
|
614
|
-
**({type(tool): tool for tool in self._builtin_tools or []}),
|
|
615
|
-
**({type(tool): tool for tool in builtin_tools}),
|
|
616
|
-
}.values()
|
|
617
|
-
)
|
|
618
|
-
else:
|
|
619
|
-
builtin_tools = list(self._builtin_tools)
|
|
611
|
+
|
|
620
612
|
graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunOutputDataT](
|
|
621
613
|
user_deps=deps,
|
|
622
614
|
prompt=user_prompt,
|
|
@@ -629,7 +621,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]):
|
|
|
629
621
|
output_schema=output_schema,
|
|
630
622
|
output_validators=output_validators,
|
|
631
623
|
history_processors=self.history_processors,
|
|
632
|
-
builtin_tools=builtin_tools,
|
|
624
|
+
builtin_tools=[*self._builtin_tools, *(builtin_tools or [])],
|
|
633
625
|
tool_manager=tool_manager,
|
|
634
626
|
tracer=tracer,
|
|
635
627
|
get_instructions=get_instructions,
|
pydantic_ai/builtin_tools.py
CHANGED
|
@@ -16,6 +16,7 @@ __all__ = (
|
|
|
16
16
|
'UrlContextTool',
|
|
17
17
|
'ImageGenerationTool',
|
|
18
18
|
'MemoryTool',
|
|
19
|
+
'MCPServerTool',
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
_BUILTIN_TOOL_TYPES: dict[str, type[AbstractBuiltinTool]] = {}
|
|
@@ -33,6 +34,14 @@ class AbstractBuiltinTool(ABC):
|
|
|
33
34
|
kind: str = 'unknown_builtin_tool'
|
|
34
35
|
"""Built-in tool identifier, this should be available on all built-in tools as a discriminator."""
|
|
35
36
|
|
|
37
|
+
@property
|
|
38
|
+
def unique_id(self) -> str:
|
|
39
|
+
"""A unique identifier for the builtin tool.
|
|
40
|
+
|
|
41
|
+
If multiple instances of the same builtin tool can be passed to the model, subclasses should override this property to allow them to be distinguished.
|
|
42
|
+
"""
|
|
43
|
+
return self.kind
|
|
44
|
+
|
|
36
45
|
def __init_subclass__(cls, **kwargs: Any) -> None:
|
|
37
46
|
super().__init_subclass__(**kwargs)
|
|
38
47
|
_BUILTIN_TOOL_TYPES[cls.kind] = cls
|
|
@@ -263,6 +272,68 @@ class MemoryTool(AbstractBuiltinTool):
|
|
|
263
272
|
"""The kind of tool."""
|
|
264
273
|
|
|
265
274
|
|
|
275
|
+
@dataclass(kw_only=True)
|
|
276
|
+
class MCPServerTool(AbstractBuiltinTool):
|
|
277
|
+
"""A builtin tool that allows your agent to use MCP servers.
|
|
278
|
+
|
|
279
|
+
Supported by:
|
|
280
|
+
|
|
281
|
+
* OpenAI Responses
|
|
282
|
+
* Anthropic
|
|
283
|
+
"""
|
|
284
|
+
|
|
285
|
+
id: str
|
|
286
|
+
"""A unique identifier for the MCP server."""
|
|
287
|
+
|
|
288
|
+
url: str
|
|
289
|
+
"""The URL of the MCP server to use.
|
|
290
|
+
|
|
291
|
+
For OpenAI Responses, it is possible to use `connector_id` by providing it as `x-openai-connector:<connector_id>`.
|
|
292
|
+
"""
|
|
293
|
+
|
|
294
|
+
authorization_token: str | None = None
|
|
295
|
+
"""Authorization header to use when making requests to the MCP server.
|
|
296
|
+
|
|
297
|
+
Supported by:
|
|
298
|
+
|
|
299
|
+
* OpenAI Responses
|
|
300
|
+
* Anthropic
|
|
301
|
+
"""
|
|
302
|
+
|
|
303
|
+
description: str | None = None
|
|
304
|
+
"""A description of the MCP server.
|
|
305
|
+
|
|
306
|
+
Supported by:
|
|
307
|
+
|
|
308
|
+
* OpenAI Responses
|
|
309
|
+
"""
|
|
310
|
+
|
|
311
|
+
allowed_tools: list[str] | None = None
|
|
312
|
+
"""A list of tools that the MCP server can use.
|
|
313
|
+
|
|
314
|
+
Supported by:
|
|
315
|
+
|
|
316
|
+
* OpenAI Responses
|
|
317
|
+
* Anthropic
|
|
318
|
+
"""
|
|
319
|
+
|
|
320
|
+
headers: dict[str, str] | None = None
|
|
321
|
+
"""Optional HTTP headers to send to the MCP server.
|
|
322
|
+
|
|
323
|
+
Use for authentication or other purposes.
|
|
324
|
+
|
|
325
|
+
Supported by:
|
|
326
|
+
|
|
327
|
+
* OpenAI Responses
|
|
328
|
+
"""
|
|
329
|
+
|
|
330
|
+
kind: str = 'mcp_server'
|
|
331
|
+
|
|
332
|
+
@property
|
|
333
|
+
def unique_id(self) -> str:
|
|
334
|
+
return ':'.join([self.kind, self.id])
|
|
335
|
+
|
|
336
|
+
|
|
266
337
|
def _tool_discriminator(tool_data: dict[str, Any] | AbstractBuiltinTool) -> str:
|
|
267
338
|
if isinstance(tool_data, dict):
|
|
268
339
|
return tool_data.get('kind', AbstractBuiltinTool.kind)
|
pydantic_ai/models/__init__.py
CHANGED
|
@@ -410,9 +410,17 @@ class Model(ABC):
|
|
|
410
410
|
they need to customize the preparation flow further, but most implementations should simply call
|
|
411
411
|
``self.prepare_request(...)`` at the start of their ``request`` (and related) methods.
|
|
412
412
|
"""
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
413
|
+
model_settings = merge_model_settings(self.settings, model_settings)
|
|
414
|
+
|
|
415
|
+
if builtin_tools := model_request_parameters.builtin_tools:
|
|
416
|
+
# Deduplicate builtin tools
|
|
417
|
+
model_request_parameters = replace(
|
|
418
|
+
model_request_parameters,
|
|
419
|
+
builtin_tools=list({tool.unique_id: tool for tool in builtin_tools}.values()),
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
model_request_parameters = self.customize_request_parameters(model_request_parameters)
|
|
423
|
+
return model_settings, model_request_parameters
|
|
416
424
|
|
|
417
425
|
@property
|
|
418
426
|
@abstractmethod
|
pydantic_ai/models/anthropic.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations as _annotations
|
|
|
3
3
|
import io
|
|
4
4
|
from collections.abc import AsyncGenerator, AsyncIterable, AsyncIterator
|
|
5
5
|
from contextlib import asynccontextmanager
|
|
6
|
-
from dataclasses import dataclass, field
|
|
6
|
+
from dataclasses import dataclass, field, replace
|
|
7
7
|
from datetime import datetime
|
|
8
8
|
from typing import Any, Literal, cast, overload
|
|
9
9
|
|
|
@@ -13,7 +13,7 @@ from typing_extensions import assert_never
|
|
|
13
13
|
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
14
14
|
from .._run_context import RunContext
|
|
15
15
|
from .._utils import guard_tool_call_id as _guard_tool_call_id
|
|
16
|
-
from ..builtin_tools import CodeExecutionTool, MemoryTool, WebSearchTool
|
|
16
|
+
from ..builtin_tools import CodeExecutionTool, MCPServerTool, MemoryTool, WebSearchTool
|
|
17
17
|
from ..exceptions import UserError
|
|
18
18
|
from ..messages import (
|
|
19
19
|
BinaryContent,
|
|
@@ -68,6 +68,9 @@ try:
|
|
|
68
68
|
BetaContentBlockParam,
|
|
69
69
|
BetaImageBlockParam,
|
|
70
70
|
BetaInputJSONDelta,
|
|
71
|
+
BetaMCPToolResultBlock,
|
|
72
|
+
BetaMCPToolUseBlock,
|
|
73
|
+
BetaMCPToolUseBlockParam,
|
|
71
74
|
BetaMemoryTool20250818Param,
|
|
72
75
|
BetaMessage,
|
|
73
76
|
BetaMessageParam,
|
|
@@ -82,6 +85,8 @@ try:
|
|
|
82
85
|
BetaRawMessageStreamEvent,
|
|
83
86
|
BetaRedactedThinkingBlock,
|
|
84
87
|
BetaRedactedThinkingBlockParam,
|
|
88
|
+
BetaRequestMCPServerToolConfigurationParam,
|
|
89
|
+
BetaRequestMCPServerURLDefinitionParam,
|
|
85
90
|
BetaServerToolUseBlock,
|
|
86
91
|
BetaServerToolUseBlockParam,
|
|
87
92
|
BetaSignatureDelta,
|
|
@@ -264,7 +269,7 @@ class AnthropicModel(Model):
|
|
|
264
269
|
) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
|
|
265
270
|
# standalone function to make it easier to override
|
|
266
271
|
tools = self._get_tools(model_request_parameters)
|
|
267
|
-
tools, beta_features = self._add_builtin_tools(tools, model_request_parameters)
|
|
272
|
+
tools, mcp_servers, beta_features = self._add_builtin_tools(tools, model_request_parameters)
|
|
268
273
|
|
|
269
274
|
tool_choice: BetaToolChoiceParam | None
|
|
270
275
|
|
|
@@ -300,6 +305,7 @@ class AnthropicModel(Model):
|
|
|
300
305
|
model=self._model_name,
|
|
301
306
|
tools=tools or OMIT,
|
|
302
307
|
tool_choice=tool_choice or OMIT,
|
|
308
|
+
mcp_servers=mcp_servers or OMIT,
|
|
303
309
|
stream=stream,
|
|
304
310
|
thinking=model_settings.get('anthropic_thinking', OMIT),
|
|
305
311
|
stop_sequences=model_settings.get('stop_sequences', OMIT),
|
|
@@ -318,11 +324,14 @@ class AnthropicModel(Model):
|
|
|
318
324
|
def _process_response(self, response: BetaMessage) -> ModelResponse:
|
|
319
325
|
"""Process a non-streamed response, and prepare a message to return."""
|
|
320
326
|
items: list[ModelResponsePart] = []
|
|
327
|
+
builtin_tool_calls: dict[str, BuiltinToolCallPart] = {}
|
|
321
328
|
for item in response.content:
|
|
322
329
|
if isinstance(item, BetaTextBlock):
|
|
323
330
|
items.append(TextPart(content=item.text))
|
|
324
331
|
elif isinstance(item, BetaServerToolUseBlock):
|
|
325
|
-
|
|
332
|
+
call_part = _map_server_tool_use_block(item, self.system)
|
|
333
|
+
builtin_tool_calls[call_part.tool_call_id] = call_part
|
|
334
|
+
items.append(call_part)
|
|
326
335
|
elif isinstance(item, BetaWebSearchToolResultBlock):
|
|
327
336
|
items.append(_map_web_search_tool_result_block(item, self.system))
|
|
328
337
|
elif isinstance(item, BetaCodeExecutionToolResultBlock):
|
|
@@ -333,6 +342,13 @@ class AnthropicModel(Model):
|
|
|
333
342
|
)
|
|
334
343
|
elif isinstance(item, BetaThinkingBlock):
|
|
335
344
|
items.append(ThinkingPart(content=item.thinking, signature=item.signature, provider_name=self.system))
|
|
345
|
+
elif isinstance(item, BetaMCPToolUseBlock):
|
|
346
|
+
call_part = _map_mcp_server_use_block(item, self.system)
|
|
347
|
+
builtin_tool_calls[call_part.tool_call_id] = call_part
|
|
348
|
+
items.append(call_part)
|
|
349
|
+
elif isinstance(item, BetaMCPToolResultBlock):
|
|
350
|
+
call_part = builtin_tool_calls.get(item.tool_use_id)
|
|
351
|
+
items.append(_map_mcp_server_result_block(item, call_part, self.system))
|
|
336
352
|
else:
|
|
337
353
|
assert isinstance(item, BetaToolUseBlock), f'unexpected item type {type(item)}'
|
|
338
354
|
items.append(
|
|
@@ -383,8 +399,9 @@ class AnthropicModel(Model):
|
|
|
383
399
|
|
|
384
400
|
def _add_builtin_tools(
|
|
385
401
|
self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters
|
|
386
|
-
) -> tuple[list[BetaToolUnionParam], list[str]]:
|
|
402
|
+
) -> tuple[list[BetaToolUnionParam], list[BetaRequestMCPServerURLDefinitionParam], list[str]]:
|
|
387
403
|
beta_features: list[str] = []
|
|
404
|
+
mcp_servers: list[BetaRequestMCPServerURLDefinitionParam] = []
|
|
388
405
|
for tool in model_request_parameters.builtin_tools:
|
|
389
406
|
if isinstance(tool, WebSearchTool):
|
|
390
407
|
user_location = UserLocation(type='approximate', **tool.user_location) if tool.user_location else None
|
|
@@ -408,11 +425,26 @@ class AnthropicModel(Model):
|
|
|
408
425
|
tools = [tool for tool in tools if tool['name'] != 'memory']
|
|
409
426
|
tools.append(BetaMemoryTool20250818Param(name='memory', type='memory_20250818'))
|
|
410
427
|
beta_features.append('context-management-2025-06-27')
|
|
428
|
+
elif isinstance(tool, MCPServerTool) and tool.url:
|
|
429
|
+
mcp_server_url_definition_param = BetaRequestMCPServerURLDefinitionParam(
|
|
430
|
+
type='url',
|
|
431
|
+
name=tool.id,
|
|
432
|
+
url=tool.url,
|
|
433
|
+
)
|
|
434
|
+
if tool.allowed_tools is not None: # pragma: no branch
|
|
435
|
+
mcp_server_url_definition_param['tool_configuration'] = BetaRequestMCPServerToolConfigurationParam(
|
|
436
|
+
enabled=bool(tool.allowed_tools),
|
|
437
|
+
allowed_tools=tool.allowed_tools,
|
|
438
|
+
)
|
|
439
|
+
if tool.authorization_token: # pragma: no cover
|
|
440
|
+
mcp_server_url_definition_param['authorization_token'] = tool.authorization_token
|
|
441
|
+
mcp_servers.append(mcp_server_url_definition_param)
|
|
442
|
+
beta_features.append('mcp-client-2025-04-04')
|
|
411
443
|
else: # pragma: no cover
|
|
412
444
|
raise UserError(
|
|
413
445
|
f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.'
|
|
414
446
|
)
|
|
415
|
-
return tools, beta_features
|
|
447
|
+
return tools, mcp_servers, beta_features
|
|
416
448
|
|
|
417
449
|
async def _map_message(self, messages: list[ModelMessage]) -> tuple[str, list[BetaMessageParam]]: # noqa: C901
|
|
418
450
|
"""Just maps a `pydantic_ai.Message` to a `anthropic.types.MessageParam`."""
|
|
@@ -458,6 +490,8 @@ class AnthropicModel(Model):
|
|
|
458
490
|
| BetaCodeExecutionToolResultBlockParam
|
|
459
491
|
| BetaThinkingBlockParam
|
|
460
492
|
| BetaRedactedThinkingBlockParam
|
|
493
|
+
| BetaMCPToolUseBlockParam
|
|
494
|
+
| BetaMCPToolResultBlock
|
|
461
495
|
] = []
|
|
462
496
|
for response_part in m.parts:
|
|
463
497
|
if isinstance(response_part, TextPart):
|
|
@@ -508,7 +542,7 @@ class AnthropicModel(Model):
|
|
|
508
542
|
input=response_part.args_as_dict(),
|
|
509
543
|
)
|
|
510
544
|
assistant_content_params.append(server_tool_use_block_param)
|
|
511
|
-
elif response_part.tool_name == CodeExecutionTool.kind:
|
|
545
|
+
elif response_part.tool_name == CodeExecutionTool.kind:
|
|
512
546
|
server_tool_use_block_param = BetaServerToolUseBlockParam(
|
|
513
547
|
id=tool_use_id,
|
|
514
548
|
type='server_tool_use',
|
|
@@ -516,6 +550,21 @@ class AnthropicModel(Model):
|
|
|
516
550
|
input=response_part.args_as_dict(),
|
|
517
551
|
)
|
|
518
552
|
assistant_content_params.append(server_tool_use_block_param)
|
|
553
|
+
elif (
|
|
554
|
+
response_part.tool_name.startswith(MCPServerTool.kind)
|
|
555
|
+
and (server_id := response_part.tool_name.split(':', 1)[1])
|
|
556
|
+
and (args := response_part.args_as_dict())
|
|
557
|
+
and (tool_name := args.get('tool_name'))
|
|
558
|
+
and (tool_args := args.get('tool_args'))
|
|
559
|
+
): # pragma: no branch
|
|
560
|
+
mcp_tool_use_block_param = BetaMCPToolUseBlockParam(
|
|
561
|
+
id=tool_use_id,
|
|
562
|
+
type='mcp_tool_use',
|
|
563
|
+
server_name=server_id,
|
|
564
|
+
name=tool_name,
|
|
565
|
+
input=tool_args,
|
|
566
|
+
)
|
|
567
|
+
assistant_content_params.append(mcp_tool_use_block_param)
|
|
519
568
|
elif isinstance(response_part, BuiltinToolReturnPart):
|
|
520
569
|
if response_part.provider_name == self.system:
|
|
521
570
|
tool_use_id = _guard_tool_call_id(t=response_part)
|
|
@@ -547,6 +596,16 @@ class AnthropicModel(Model):
|
|
|
547
596
|
),
|
|
548
597
|
)
|
|
549
598
|
)
|
|
599
|
+
elif response_part.tool_name.startswith(MCPServerTool.kind) and isinstance(
|
|
600
|
+
response_part.content, dict
|
|
601
|
+
): # pragma: no branch
|
|
602
|
+
assistant_content_params.append(
|
|
603
|
+
BetaMCPToolResultBlock(
|
|
604
|
+
tool_use_id=tool_use_id,
|
|
605
|
+
type='mcp_tool_result',
|
|
606
|
+
**cast(dict[str, Any], response_part.content), # pyright: ignore[reportUnknownMemberType]
|
|
607
|
+
)
|
|
608
|
+
)
|
|
550
609
|
elif isinstance(response_part, FilePart): # pragma: no cover
|
|
551
610
|
# Files generated by models are not sent back to models that don't themselves generate files.
|
|
552
611
|
pass
|
|
@@ -661,6 +720,7 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
661
720
|
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
|
|
662
721
|
current_block: BetaContentBlock | None = None
|
|
663
722
|
|
|
723
|
+
builtin_tool_calls: dict[str, BuiltinToolCallPart] = {}
|
|
664
724
|
async for event in self._response:
|
|
665
725
|
if isinstance(event, BetaRawMessageStartEvent):
|
|
666
726
|
self._usage = _map_usage(event, self._provider_name, self._provider_url, self._model_name)
|
|
@@ -698,9 +758,11 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
698
758
|
if maybe_event is not None: # pragma: no branch
|
|
699
759
|
yield maybe_event
|
|
700
760
|
elif isinstance(current_block, BetaServerToolUseBlock):
|
|
761
|
+
call_part = _map_server_tool_use_block(current_block, self.provider_name)
|
|
762
|
+
builtin_tool_calls[call_part.tool_call_id] = call_part
|
|
701
763
|
yield self._parts_manager.handle_part(
|
|
702
764
|
vendor_part_id=event.index,
|
|
703
|
-
part=
|
|
765
|
+
part=call_part,
|
|
704
766
|
)
|
|
705
767
|
elif isinstance(current_block, BetaWebSearchToolResultBlock):
|
|
706
768
|
yield self._parts_manager.handle_part(
|
|
@@ -712,6 +774,32 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
712
774
|
vendor_part_id=event.index,
|
|
713
775
|
part=_map_code_execution_tool_result_block(current_block, self.provider_name),
|
|
714
776
|
)
|
|
777
|
+
elif isinstance(current_block, BetaMCPToolUseBlock):
|
|
778
|
+
call_part = _map_mcp_server_use_block(current_block, self.provider_name)
|
|
779
|
+
builtin_tool_calls[call_part.tool_call_id] = call_part
|
|
780
|
+
|
|
781
|
+
args_json = call_part.args_as_json_str()
|
|
782
|
+
# Drop the final `{}}` so that we can add tool args deltas
|
|
783
|
+
args_json_delta = args_json[:-3]
|
|
784
|
+
assert args_json_delta.endswith('"tool_args":'), (
|
|
785
|
+
f'Expected {args_json_delta!r} to end in `"tool_args":`'
|
|
786
|
+
)
|
|
787
|
+
|
|
788
|
+
yield self._parts_manager.handle_part(
|
|
789
|
+
vendor_part_id=event.index, part=replace(call_part, args=None)
|
|
790
|
+
)
|
|
791
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
792
|
+
vendor_part_id=event.index,
|
|
793
|
+
args=args_json_delta,
|
|
794
|
+
)
|
|
795
|
+
if maybe_event is not None: # pragma: no branch
|
|
796
|
+
yield maybe_event
|
|
797
|
+
elif isinstance(current_block, BetaMCPToolResultBlock):
|
|
798
|
+
call_part = builtin_tool_calls.get(current_block.tool_use_id)
|
|
799
|
+
yield self._parts_manager.handle_part(
|
|
800
|
+
vendor_part_id=event.index,
|
|
801
|
+
part=_map_mcp_server_result_block(current_block, call_part, self.provider_name),
|
|
802
|
+
)
|
|
715
803
|
|
|
716
804
|
elif isinstance(event, BetaRawContentBlockDeltaEvent):
|
|
717
805
|
if isinstance(event.delta, BetaTextDelta):
|
|
@@ -749,7 +837,16 @@ class AnthropicStreamedResponse(StreamedResponse):
|
|
|
749
837
|
self.provider_details = {'finish_reason': raw_finish_reason}
|
|
750
838
|
self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
|
|
751
839
|
|
|
752
|
-
elif isinstance(event, BetaRawContentBlockStopEvent
|
|
840
|
+
elif isinstance(event, BetaRawContentBlockStopEvent): # pragma: no branch
|
|
841
|
+
if isinstance(current_block, BetaMCPToolUseBlock):
|
|
842
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
843
|
+
vendor_part_id=event.index,
|
|
844
|
+
args='}',
|
|
845
|
+
)
|
|
846
|
+
if maybe_event is not None: # pragma: no branch
|
|
847
|
+
yield maybe_event
|
|
848
|
+
current_block = None
|
|
849
|
+
elif isinstance(event, BetaRawMessageStopEvent): # pragma: no branch
|
|
753
850
|
current_block = None
|
|
754
851
|
|
|
755
852
|
@property
|
|
@@ -817,3 +914,27 @@ def _map_code_execution_tool_result_block(
|
|
|
817
914
|
content=code_execution_tool_result_content_ta.dump_python(item.content, mode='json'),
|
|
818
915
|
tool_call_id=item.tool_use_id,
|
|
819
916
|
)
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
def _map_mcp_server_use_block(item: BetaMCPToolUseBlock, provider_name: str) -> BuiltinToolCallPart:
|
|
920
|
+
return BuiltinToolCallPart(
|
|
921
|
+
provider_name=provider_name,
|
|
922
|
+
tool_name=':'.join([MCPServerTool.kind, item.server_name]),
|
|
923
|
+
args={
|
|
924
|
+
'action': 'call_tool',
|
|
925
|
+
'tool_name': item.name,
|
|
926
|
+
'tool_args': cast(dict[str, Any], item.input),
|
|
927
|
+
},
|
|
928
|
+
tool_call_id=item.id,
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
|
|
932
|
+
def _map_mcp_server_result_block(
|
|
933
|
+
item: BetaMCPToolResultBlock, call_part: BuiltinToolCallPart | None, provider_name: str
|
|
934
|
+
) -> BuiltinToolReturnPart:
|
|
935
|
+
return BuiltinToolReturnPart(
|
|
936
|
+
provider_name=provider_name,
|
|
937
|
+
tool_name=call_part.tool_name if call_part else MCPServerTool.kind,
|
|
938
|
+
content=item.model_dump(mode='json', include={'content', 'is_error'}),
|
|
939
|
+
tool_call_id=item.tool_use_id,
|
|
940
|
+
)
|
pydantic_ai/models/google.py
CHANGED
|
@@ -126,6 +126,8 @@ _FINISH_REASON_MAP: dict[GoogleFinishReason, FinishReason | None] = {
|
|
|
126
126
|
GoogleFinishReason.MALFORMED_FUNCTION_CALL: 'error',
|
|
127
127
|
GoogleFinishReason.IMAGE_SAFETY: 'content_filter',
|
|
128
128
|
GoogleFinishReason.UNEXPECTED_TOOL_CALL: 'error',
|
|
129
|
+
GoogleFinishReason.IMAGE_PROHIBITED_CONTENT: 'content_filter',
|
|
130
|
+
GoogleFinishReason.NO_IMAGE: 'error',
|
|
129
131
|
}
|
|
130
132
|
|
|
131
133
|
|
|
@@ -453,23 +455,28 @@ class GoogleModel(Model):
|
|
|
453
455
|
def _process_response(self, response: GenerateContentResponse) -> ModelResponse:
|
|
454
456
|
if not response.candidates:
|
|
455
457
|
raise UnexpectedModelBehavior('Expected at least one candidate in Gemini response') # pragma: no cover
|
|
458
|
+
|
|
456
459
|
candidate = response.candidates[0]
|
|
457
|
-
if candidate.content is None or candidate.content.parts is None:
|
|
458
|
-
if candidate.finish_reason == 'SAFETY':
|
|
459
|
-
raise UnexpectedModelBehavior('Safety settings triggered', str(response))
|
|
460
|
-
else:
|
|
461
|
-
raise UnexpectedModelBehavior(
|
|
462
|
-
'Content field missing from Gemini response', str(response)
|
|
463
|
-
) # pragma: no cover
|
|
464
|
-
parts = candidate.content.parts or []
|
|
465
460
|
|
|
466
461
|
vendor_id = response.response_id
|
|
467
462
|
vendor_details: dict[str, Any] | None = None
|
|
468
463
|
finish_reason: FinishReason | None = None
|
|
469
|
-
|
|
464
|
+
raw_finish_reason = candidate.finish_reason
|
|
465
|
+
if raw_finish_reason: # pragma: no branch
|
|
470
466
|
vendor_details = {'finish_reason': raw_finish_reason.value}
|
|
471
467
|
finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
|
|
472
468
|
|
|
469
|
+
if candidate.content is None or candidate.content.parts is None:
|
|
470
|
+
if finish_reason == 'content_filter' and raw_finish_reason:
|
|
471
|
+
raise UnexpectedModelBehavior(
|
|
472
|
+
f'Content filter {raw_finish_reason.value!r} triggered', response.model_dump_json()
|
|
473
|
+
)
|
|
474
|
+
else:
|
|
475
|
+
raise UnexpectedModelBehavior(
|
|
476
|
+
'Content field missing from Gemini response', response.model_dump_json()
|
|
477
|
+
) # pragma: no cover
|
|
478
|
+
parts = candidate.content.parts or []
|
|
479
|
+
|
|
473
480
|
usage = _metadata_as_usage(response)
|
|
474
481
|
return _process_response_from_parts(
|
|
475
482
|
parts,
|
|
@@ -623,7 +630,8 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
623
630
|
if chunk.response_id: # pragma: no branch
|
|
624
631
|
self.provider_response_id = chunk.response_id
|
|
625
632
|
|
|
626
|
-
|
|
633
|
+
raw_finish_reason = candidate.finish_reason
|
|
634
|
+
if raw_finish_reason:
|
|
627
635
|
self.provider_details = {'finish_reason': raw_finish_reason.value}
|
|
628
636
|
self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
|
|
629
637
|
|
|
@@ -641,13 +649,17 @@ class GeminiStreamedResponse(StreamedResponse):
|
|
|
641
649
|
# )
|
|
642
650
|
|
|
643
651
|
if candidate.content is None or candidate.content.parts is None:
|
|
644
|
-
if
|
|
652
|
+
if self.finish_reason == 'stop': # pragma: no cover
|
|
645
653
|
# Normal completion - skip this chunk
|
|
646
654
|
continue
|
|
647
|
-
elif
|
|
648
|
-
raise UnexpectedModelBehavior(
|
|
655
|
+
elif self.finish_reason == 'content_filter' and raw_finish_reason: # pragma: no cover
|
|
656
|
+
raise UnexpectedModelBehavior(
|
|
657
|
+
f'Content filter {raw_finish_reason.value!r} triggered', chunk.model_dump_json()
|
|
658
|
+
)
|
|
649
659
|
else: # pragma: no cover
|
|
650
|
-
raise UnexpectedModelBehavior(
|
|
660
|
+
raise UnexpectedModelBehavior(
|
|
661
|
+
'Content field missing from streaming Gemini response', chunk.model_dump_json()
|
|
662
|
+
)
|
|
651
663
|
|
|
652
664
|
parts = candidate.content.parts
|
|
653
665
|
if not parts:
|
pydantic_ai/models/openai.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
import base64
|
|
4
|
+
import json
|
|
4
5
|
import warnings
|
|
5
6
|
from collections.abc import AsyncIterable, AsyncIterator, Sequence
|
|
6
7
|
from contextlib import asynccontextmanager
|
|
@@ -17,7 +18,7 @@ from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
|
|
|
17
18
|
from .._run_context import RunContext
|
|
18
19
|
from .._thinking_part import split_content_into_text_and_thinking
|
|
19
20
|
from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime
|
|
20
|
-
from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, WebSearchTool
|
|
21
|
+
from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, MCPServerTool, WebSearchTool
|
|
21
22
|
from ..exceptions import UserError
|
|
22
23
|
from ..messages import (
|
|
23
24
|
AudioUrl,
|
|
@@ -109,6 +110,11 @@ Using this more broad type for the model name instead of the ChatModel definitio
|
|
|
109
110
|
allows this model to be used more easily with other model types (ie, Ollama, Deepseek).
|
|
110
111
|
"""
|
|
111
112
|
|
|
113
|
+
MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME: Literal['x-openai-connector'] = 'x-openai-connector'
|
|
114
|
+
"""
|
|
115
|
+
Prefix for OpenAI connector IDs. OpenAI supports either a URL or a connector ID when passing MCP configuration to a model,
|
|
116
|
+
by using that prefix like `x-openai-connector:<connector-id>` in a URL, you can pass a connector ID to a model.
|
|
117
|
+
"""
|
|
112
118
|
|
|
113
119
|
_CHAT_FINISH_REASON_MAP: dict[
|
|
114
120
|
Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call'], FinishReason
|
|
@@ -1061,13 +1067,16 @@ class OpenAIResponsesModel(Model):
|
|
|
1061
1067
|
elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover
|
|
1062
1068
|
# Pydantic AI doesn't yet support the FileSearch built-in tool
|
|
1063
1069
|
pass
|
|
1064
|
-
elif isinstance(
|
|
1065
|
-
item,
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1070
|
+
elif isinstance(item, responses.response_output_item.McpCall):
|
|
1071
|
+
call_part, return_part = _map_mcp_call(item, self.system)
|
|
1072
|
+
items.append(call_part)
|
|
1073
|
+
items.append(return_part)
|
|
1074
|
+
elif isinstance(item, responses.response_output_item.McpListTools):
|
|
1075
|
+
call_part, return_part = _map_mcp_list_tools(item, self.system)
|
|
1076
|
+
items.append(call_part)
|
|
1077
|
+
items.append(return_part)
|
|
1078
|
+
elif isinstance(item, responses.response_output_item.McpApprovalRequest): # pragma: no cover
|
|
1079
|
+
# Pydantic AI doesn't yet support McpApprovalRequest (explicit tool usage approval)
|
|
1071
1080
|
pass
|
|
1072
1081
|
|
|
1073
1082
|
finish_reason: FinishReason | None = None
|
|
@@ -1256,6 +1265,32 @@ class OpenAIResponsesModel(Model):
|
|
|
1256
1265
|
elif isinstance(tool, CodeExecutionTool):
|
|
1257
1266
|
has_image_generating_tool = True
|
|
1258
1267
|
tools.append({'type': 'code_interpreter', 'container': {'type': 'auto'}})
|
|
1268
|
+
elif isinstance(tool, MCPServerTool):
|
|
1269
|
+
mcp_tool = responses.tool_param.Mcp(
|
|
1270
|
+
type='mcp',
|
|
1271
|
+
server_label=tool.id,
|
|
1272
|
+
require_approval='never',
|
|
1273
|
+
)
|
|
1274
|
+
|
|
1275
|
+
if tool.authorization_token: # pragma: no branch
|
|
1276
|
+
mcp_tool['authorization'] = tool.authorization_token
|
|
1277
|
+
|
|
1278
|
+
if tool.allowed_tools is not None: # pragma: no branch
|
|
1279
|
+
mcp_tool['allowed_tools'] = tool.allowed_tools
|
|
1280
|
+
|
|
1281
|
+
if tool.description: # pragma: no branch
|
|
1282
|
+
mcp_tool['server_description'] = tool.description
|
|
1283
|
+
|
|
1284
|
+
if tool.headers: # pragma: no branch
|
|
1285
|
+
mcp_tool['headers'] = tool.headers
|
|
1286
|
+
|
|
1287
|
+
if tool.url.startswith(MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME + ':'):
|
|
1288
|
+
_, connector_id = tool.url.split(':', maxsplit=1)
|
|
1289
|
+
mcp_tool['connector_id'] = connector_id # pyright: ignore[reportGeneralTypeIssues]
|
|
1290
|
+
else:
|
|
1291
|
+
mcp_tool['server_url'] = tool.url
|
|
1292
|
+
|
|
1293
|
+
tools.append(mcp_tool)
|
|
1259
1294
|
elif isinstance(tool, ImageGenerationTool): # pragma: no branch
|
|
1260
1295
|
has_image_generating_tool = True
|
|
1261
1296
|
tools.append(
|
|
@@ -1428,7 +1463,7 @@ class OpenAIResponsesModel(Model):
|
|
|
1428
1463
|
type='web_search_call',
|
|
1429
1464
|
)
|
|
1430
1465
|
openai_messages.append(web_search_item)
|
|
1431
|
-
elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id:
|
|
1466
|
+
elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id:
|
|
1432
1467
|
# The cast is necessary because of https://github.com/openai/openai-python/issues/2648
|
|
1433
1468
|
image_generation_item = cast(
|
|
1434
1469
|
responses.response_input_item_param.ImageGenerationCall,
|
|
@@ -1438,6 +1473,37 @@ class OpenAIResponsesModel(Model):
|
|
|
1438
1473
|
},
|
|
1439
1474
|
)
|
|
1440
1475
|
openai_messages.append(image_generation_item)
|
|
1476
|
+
elif ( # pragma: no branch
|
|
1477
|
+
item.tool_name.startswith(MCPServerTool.kind)
|
|
1478
|
+
and item.tool_call_id
|
|
1479
|
+
and (server_id := item.tool_name.split(':', 1)[1])
|
|
1480
|
+
and (args := item.args_as_dict())
|
|
1481
|
+
and (action := args.get('action'))
|
|
1482
|
+
):
|
|
1483
|
+
if action == 'list_tools':
|
|
1484
|
+
mcp_list_tools_item = responses.response_input_item_param.McpListTools(
|
|
1485
|
+
id=item.tool_call_id,
|
|
1486
|
+
type='mcp_list_tools',
|
|
1487
|
+
server_label=server_id,
|
|
1488
|
+
tools=[], # These can be read server-side
|
|
1489
|
+
)
|
|
1490
|
+
openai_messages.append(mcp_list_tools_item)
|
|
1491
|
+
elif ( # pragma: no branch
|
|
1492
|
+
action == 'call_tool'
|
|
1493
|
+
and (tool_name := args.get('tool_name'))
|
|
1494
|
+
and (tool_args := args.get('tool_args'))
|
|
1495
|
+
):
|
|
1496
|
+
mcp_call_item = responses.response_input_item_param.McpCall(
|
|
1497
|
+
id=item.tool_call_id,
|
|
1498
|
+
server_label=server_id,
|
|
1499
|
+
name=tool_name,
|
|
1500
|
+
arguments=to_json(tool_args).decode(),
|
|
1501
|
+
error=None, # These can be read server-side
|
|
1502
|
+
output=None, # These can be read server-side
|
|
1503
|
+
type='mcp_call',
|
|
1504
|
+
)
|
|
1505
|
+
openai_messages.append(mcp_call_item)
|
|
1506
|
+
|
|
1441
1507
|
elif isinstance(item, BuiltinToolReturnPart):
|
|
1442
1508
|
if item.provider_name == self.system and send_item_ids:
|
|
1443
1509
|
if (
|
|
@@ -1456,9 +1522,12 @@ class OpenAIResponsesModel(Model):
|
|
|
1456
1522
|
and (status := content.get('status'))
|
|
1457
1523
|
):
|
|
1458
1524
|
web_search_item['status'] = status
|
|
1459
|
-
elif item.tool_name == ImageGenerationTool.kind:
|
|
1525
|
+
elif item.tool_name == ImageGenerationTool.kind:
|
|
1460
1526
|
# Image generation result does not need to be sent back, just the `id` off of `BuiltinToolCallPart`.
|
|
1461
1527
|
pass
|
|
1528
|
+
elif item.tool_name.startswith(MCPServerTool.kind): # pragma: no branch
|
|
1529
|
+
# MCP call result does not need to be sent back, just the fields off of `BuiltinToolCallPart`.
|
|
1530
|
+
pass
|
|
1462
1531
|
elif isinstance(item, FilePart):
|
|
1463
1532
|
# This was generated by the `ImageGenerationTool` or `CodeExecutionTool`,
|
|
1464
1533
|
# and does not need to be sent back separately from the corresponding `BuiltinToolReturnPart`.
|
|
@@ -1772,7 +1841,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1772
1841
|
args_json = call_part.args_as_json_str()
|
|
1773
1842
|
# Drop the final `"}` so that we can add code deltas
|
|
1774
1843
|
args_json_delta = args_json[:-2]
|
|
1775
|
-
assert args_json_delta.endswith('code":"')
|
|
1844
|
+
assert args_json_delta.endswith('"code":"'), f'Expected {args_json_delta!r} to end in `"code":"`'
|
|
1776
1845
|
|
|
1777
1846
|
yield self._parts_manager.handle_part(
|
|
1778
1847
|
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
|
|
@@ -1786,7 +1855,28 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1786
1855
|
elif isinstance(chunk.item, responses.response_output_item.ImageGenerationCall):
|
|
1787
1856
|
call_part, _, _ = _map_image_generation_tool_call(chunk.item, self.provider_name)
|
|
1788
1857
|
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part)
|
|
1858
|
+
elif isinstance(chunk.item, responses.response_output_item.McpCall):
|
|
1859
|
+
call_part, _ = _map_mcp_call(chunk.item, self.provider_name)
|
|
1789
1860
|
|
|
1861
|
+
args_json = call_part.args_as_json_str()
|
|
1862
|
+
# Drop the final `{}}` so that we can add tool args deltas
|
|
1863
|
+
args_json_delta = args_json[:-3]
|
|
1864
|
+
assert args_json_delta.endswith('"tool_args":'), (
|
|
1865
|
+
f'Expected {args_json_delta!r} to end in `"tool_args":"`'
|
|
1866
|
+
)
|
|
1867
|
+
|
|
1868
|
+
yield self._parts_manager.handle_part(
|
|
1869
|
+
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
|
|
1870
|
+
)
|
|
1871
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1872
|
+
vendor_part_id=f'{chunk.item.id}-call',
|
|
1873
|
+
args=args_json_delta,
|
|
1874
|
+
)
|
|
1875
|
+
if maybe_event is not None: # pragma: no branch
|
|
1876
|
+
yield maybe_event
|
|
1877
|
+
elif isinstance(chunk.item, responses.response_output_item.McpListTools):
|
|
1878
|
+
call_part, _ = _map_mcp_list_tools(chunk.item, self.provider_name)
|
|
1879
|
+
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part)
|
|
1790
1880
|
else:
|
|
1791
1881
|
warnings.warn( # pragma: no cover
|
|
1792
1882
|
f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}',
|
|
@@ -1827,6 +1917,13 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1827
1917
|
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-file', part=file_part)
|
|
1828
1918
|
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
|
|
1829
1919
|
|
|
1920
|
+
elif isinstance(chunk.item, responses.response_output_item.McpCall):
|
|
1921
|
+
_, return_part = _map_mcp_call(chunk.item, self.provider_name)
|
|
1922
|
+
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
|
|
1923
|
+
elif isinstance(chunk.item, responses.response_output_item.McpListTools):
|
|
1924
|
+
_, return_part = _map_mcp_list_tools(chunk.item, self.provider_name)
|
|
1925
|
+
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
|
|
1926
|
+
|
|
1830
1927
|
elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
|
|
1831
1928
|
yield self._parts_manager.handle_thinking_delta(
|
|
1832
1929
|
vendor_part_id=f'{chunk.item_id}-{chunk.summary_index}',
|
|
@@ -1921,6 +2018,40 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1921
2018
|
)
|
|
1922
2019
|
yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item_id}-file', part=file_part)
|
|
1923
2020
|
|
|
2021
|
+
elif isinstance(chunk, responses.ResponseMcpCallArgumentsDoneEvent):
|
|
2022
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
2023
|
+
vendor_part_id=f'{chunk.item_id}-call',
|
|
2024
|
+
args='}',
|
|
2025
|
+
)
|
|
2026
|
+
if maybe_event is not None: # pragma: no branch
|
|
2027
|
+
yield maybe_event
|
|
2028
|
+
|
|
2029
|
+
elif isinstance(chunk, responses.ResponseMcpCallArgumentsDeltaEvent):
|
|
2030
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
2031
|
+
vendor_part_id=f'{chunk.item_id}-call',
|
|
2032
|
+
args=chunk.delta,
|
|
2033
|
+
)
|
|
2034
|
+
if maybe_event is not None: # pragma: no branch
|
|
2035
|
+
yield maybe_event
|
|
2036
|
+
|
|
2037
|
+
elif isinstance(chunk, responses.ResponseMcpListToolsInProgressEvent):
|
|
2038
|
+
pass # there's nothing we need to do here
|
|
2039
|
+
|
|
2040
|
+
elif isinstance(chunk, responses.ResponseMcpListToolsCompletedEvent):
|
|
2041
|
+
pass # there's nothing we need to do here
|
|
2042
|
+
|
|
2043
|
+
elif isinstance(chunk, responses.ResponseMcpListToolsFailedEvent): # pragma: no cover
|
|
2044
|
+
pass # there's nothing we need to do here
|
|
2045
|
+
|
|
2046
|
+
elif isinstance(chunk, responses.ResponseMcpCallInProgressEvent):
|
|
2047
|
+
pass # there's nothing we need to do here
|
|
2048
|
+
|
|
2049
|
+
elif isinstance(chunk, responses.ResponseMcpCallFailedEvent): # pragma: no cover
|
|
2050
|
+
pass # there's nothing we need to do here
|
|
2051
|
+
|
|
2052
|
+
elif isinstance(chunk, responses.ResponseMcpCallCompletedEvent):
|
|
2053
|
+
pass # there's nothing we need to do here
|
|
2054
|
+
|
|
1924
2055
|
else: # pragma: no cover
|
|
1925
2056
|
warnings.warn(
|
|
1926
2057
|
f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}',
|
|
@@ -1990,7 +2121,6 @@ def _map_usage(
|
|
|
1990
2121
|
def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
|
|
1991
2122
|
# When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
|
|
1992
2123
|
# Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`.
|
|
1993
|
-
|
|
1994
2124
|
if '|' in combined_id:
|
|
1995
2125
|
call_id, id = combined_id.split('|', 1)
|
|
1996
2126
|
return call_id, id
|
|
@@ -2030,7 +2160,7 @@ def _map_code_interpreter_tool_call(
|
|
|
2030
2160
|
tool_call_id=item.id,
|
|
2031
2161
|
args={
|
|
2032
2162
|
'container_id': item.container_id,
|
|
2033
|
-
'code': item.code,
|
|
2163
|
+
'code': item.code or '',
|
|
2034
2164
|
},
|
|
2035
2165
|
provider_name=provider_name,
|
|
2036
2166
|
),
|
|
@@ -2122,3 +2252,50 @@ def _map_image_generation_tool_call(
|
|
|
2122
2252
|
),
|
|
2123
2253
|
file_part,
|
|
2124
2254
|
)
|
|
2255
|
+
|
|
2256
|
+
|
|
2257
|
+
def _map_mcp_list_tools(
|
|
2258
|
+
item: responses.response_output_item.McpListTools, provider_name: str
|
|
2259
|
+
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
|
|
2260
|
+
tool_name = ':'.join([MCPServerTool.kind, item.server_label])
|
|
2261
|
+
return (
|
|
2262
|
+
BuiltinToolCallPart(
|
|
2263
|
+
tool_name=tool_name,
|
|
2264
|
+
tool_call_id=item.id,
|
|
2265
|
+
provider_name=provider_name,
|
|
2266
|
+
args={'action': 'list_tools'},
|
|
2267
|
+
),
|
|
2268
|
+
BuiltinToolReturnPart(
|
|
2269
|
+
tool_name=tool_name,
|
|
2270
|
+
tool_call_id=item.id,
|
|
2271
|
+
content=item.model_dump(mode='json', include={'tools', 'error'}),
|
|
2272
|
+
provider_name=provider_name,
|
|
2273
|
+
),
|
|
2274
|
+
)
|
|
2275
|
+
|
|
2276
|
+
|
|
2277
|
+
def _map_mcp_call(
|
|
2278
|
+
item: responses.response_output_item.McpCall, provider_name: str
|
|
2279
|
+
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
|
|
2280
|
+
tool_name = ':'.join([MCPServerTool.kind, item.server_label])
|
|
2281
|
+
return (
|
|
2282
|
+
BuiltinToolCallPart(
|
|
2283
|
+
tool_name=tool_name,
|
|
2284
|
+
tool_call_id=item.id,
|
|
2285
|
+
args={
|
|
2286
|
+
'action': 'call_tool',
|
|
2287
|
+
'tool_name': item.name,
|
|
2288
|
+
'tool_args': json.loads(item.arguments) if item.arguments else {},
|
|
2289
|
+
},
|
|
2290
|
+
provider_name=provider_name,
|
|
2291
|
+
),
|
|
2292
|
+
BuiltinToolReturnPart(
|
|
2293
|
+
tool_name=tool_name,
|
|
2294
|
+
tool_call_id=item.id,
|
|
2295
|
+
content={
|
|
2296
|
+
'output': item.output,
|
|
2297
|
+
'error': item.error,
|
|
2298
|
+
},
|
|
2299
|
+
provider_name=provider_name,
|
|
2300
|
+
),
|
|
2301
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.35
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.4.0
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
|
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.4.0; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.46.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
pydantic_ai/__init__.py,sha256=
|
|
1
|
+
pydantic_ai/__init__.py,sha256=KAHapOSW1U5w9qiC5YDoe8e-YCS1aO5t0HDRrbICGXA,5239
|
|
2
2
|
pydantic_ai/__main__.py,sha256=Q_zJU15DUA01YtlJ2mnaLCoId2YmgmreVEERGuQT-Y0,132
|
|
3
3
|
pydantic_ai/_a2a.py,sha256=3_pl7JW2yHdu31qLgCrdcTZTqXaJNjAwUV6zavah_w8,12159
|
|
4
|
-
pydantic_ai/_agent_graph.py,sha256=
|
|
4
|
+
pydantic_ai/_agent_graph.py,sha256=U5MUfLpMRdNpHrvZ22eE_P0PoFx7uTX_AxxiZiGdu2U,56433
|
|
5
5
|
pydantic_ai/_cli.py,sha256=iZTCFrpJy3aUZ49nJQ5nw2INFw6gPVQd8EhB0rahVcI,14005
|
|
6
6
|
pydantic_ai/_function_schema.py,sha256=UnDGh7Wh5z70pEaRujXF_hKsSibQdN2ywI6lZGz3LUo,11663
|
|
7
7
|
pydantic_ai/_griffe.py,sha256=BphvTL00FHxsSY56GM-bNyCOdwrpL0T3LbDQITWUK_Q,5280
|
|
@@ -17,7 +17,7 @@ pydantic_ai/_thinking_part.py,sha256=_0DajGyWPa50WUTPWN1UPfZw0xD8_hHcuSt0T3fgRr0
|
|
|
17
17
|
pydantic_ai/_tool_manager.py,sha256=se5Fikg4HaiTOnxJ4LFrezktZ2Zfv9a2OH0V9PtFE54,10464
|
|
18
18
|
pydantic_ai/_utils.py,sha256=TBzJ03szJPrmDdqRqKTyhRboTsyP6wppnCCprpZFBMw,16620
|
|
19
19
|
pydantic_ai/ag_ui.py,sha256=X3b4P_IraypCE3r-L2ETIo8G951A1MDdP4P5TQ8Fces,32067
|
|
20
|
-
pydantic_ai/builtin_tools.py,sha256=
|
|
20
|
+
pydantic_ai/builtin_tools.py,sha256=EYSp9JVRethTLz-cL6HNrFRqnYaJMYBoDi-FTMcFf8c,8448
|
|
21
21
|
pydantic_ai/direct.py,sha256=i5yZ9Tx8IiwXg6Nz9CW4-fyXzxnjP59fsklExCh5sjA,15111
|
|
22
22
|
pydantic_ai/exceptions.py,sha256=oPwXgGMADfA59ehGYNOhfqL9LOlaV_QnYq-ojrogZfA,5136
|
|
23
23
|
pydantic_ai/format_prompt.py,sha256=cLyWO8g77Y4JzqVSikqodXaAfTn6i-k206rNhYTiIsE,9710
|
|
@@ -31,7 +31,7 @@ pydantic_ai/run.py,sha256=dV3zIztC-lfOCKecXg_Mcx2CyOfUbxQC0JbZuPvQhTI,16227
|
|
|
31
31
|
pydantic_ai/settings.py,sha256=0mr6KudxKKjTG8e3nsv_8vDLxNhu_1-WvefCOzCGSYM,3565
|
|
32
32
|
pydantic_ai/tools.py,sha256=dCecmJtRkF1ioqFYbfT00XGGqzGB4PPO9n6IrHCQtnc,20343
|
|
33
33
|
pydantic_ai/usage.py,sha256=lhReoVNwqt7mfmWk40A1ddnKk4-MVFJ0qCl_oFdGzxo,16251
|
|
34
|
-
pydantic_ai/agent/__init__.py,sha256=
|
|
34
|
+
pydantic_ai/agent/__init__.py,sha256=UTd9xNwUM5pPpnve0AGpOQ3WKT_FHJtfAO08wkFusKQ,66697
|
|
35
35
|
pydantic_ai/agent/abstract.py,sha256=Akq1NvfzXbIEJwwvo_t-FQ6MobW_cPWSeUXffdUN7Og,55651
|
|
36
36
|
pydantic_ai/agent/wrapper.py,sha256=ygwfMq24mGe3pGIK-TtPAy3cV7M8VZJW3ulEHvwNTck,10293
|
|
37
37
|
pydantic_ai/common_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -62,20 +62,20 @@ pydantic_ai/durable_exec/temporal/_toolset.py,sha256=IlPQrumm2MpZrb518ru15s0jIl8
|
|
|
62
62
|
pydantic_ai/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
63
63
|
pydantic_ai/ext/aci.py,sha256=YWYLXzTQJ6hS7qfgNycA8cRl69gogGgThqEU6II7eMA,2527
|
|
64
64
|
pydantic_ai/ext/langchain.py,sha256=kmbbV3Cx2BiNYEJCZMHVYQquUQD-zG2L_bwDangy0Ww,2317
|
|
65
|
-
pydantic_ai/models/__init__.py,sha256=
|
|
66
|
-
pydantic_ai/models/anthropic.py,sha256=
|
|
65
|
+
pydantic_ai/models/__init__.py,sha256=YPE0kbN0C71ljqH75m4Xsnlq5gkTxiIQt9efSGKHpf8,36146
|
|
66
|
+
pydantic_ai/models/anthropic.py,sha256=tNHLk-sao7YEFNr8-bWU2rJS3a_yO8SHgrDacScvk_k,44772
|
|
67
67
|
pydantic_ai/models/bedrock.py,sha256=M_3h_S3t2s7GOiP0YIHoJjwW3d2PLzNnmXTENomV9GM,33699
|
|
68
68
|
pydantic_ai/models/cohere.py,sha256=wQ3UYiFMs5Oyeyz5sd6NyG3b94iCeYBptnJC8bEYOUA,13892
|
|
69
69
|
pydantic_ai/models/fallback.py,sha256=fjQz7qRuxEwC6aFYkglBv-2Z39-6kZ931vs6o7PIti8,5016
|
|
70
70
|
pydantic_ai/models/function.py,sha256=7-ej1m4f7c1TbvgB8sF02qlFD7Kf-EX-k_xN4RkbIEw,15880
|
|
71
71
|
pydantic_ai/models/gemini.py,sha256=ZMO1mUX6GXPo0N2OHoi_nS9Lb-Rqf0YFsILoRcssaG4,40410
|
|
72
|
-
pydantic_ai/models/google.py,sha256=
|
|
72
|
+
pydantic_ai/models/google.py,sha256=rcYzRMELj98dgnw8YrBHM1R3HLVjCTkWgDXMSNQrxOA,42141
|
|
73
73
|
pydantic_ai/models/groq.py,sha256=cB42E-EPX5O-lRRMsd3FTypVVuVVMDc2hV2c8H4N4rA,29665
|
|
74
74
|
pydantic_ai/models/huggingface.py,sha256=iADyoCKYrNyjixr55rEpXW02F-sah4rLmqrThEcNNDw,21464
|
|
75
75
|
pydantic_ai/models/instrumented.py,sha256=J8eVTutr3UP1r_wd5sM5c0BIdzkRqT-EGgd2NiF0ssQ,22319
|
|
76
76
|
pydantic_ai/models/mcp_sampling.py,sha256=qY4y4nXbRpNp2QbkfjzWLvF_8KLZGXypz4cc0lYRHXU,3553
|
|
77
77
|
pydantic_ai/models/mistral.py,sha256=fi57hADjYxZw8wEpAcNI6mqY32VG9hHK9GGRQ-9vlZg,33905
|
|
78
|
-
pydantic_ai/models/openai.py,sha256=
|
|
78
|
+
pydantic_ai/models/openai.py,sha256=wQJDGVAPzN5GNzny4ZN0CrnnrPIMxUOXQYfAtK0u7z4,108980
|
|
79
79
|
pydantic_ai/models/test.py,sha256=5ER66nwZG7Iwm-KkzPo4vwNd3rulzgkpgysu4YcT1W4,20568
|
|
80
80
|
pydantic_ai/models/wrapper.py,sha256=nwh8Gea59blbr1JDKlUnkYICuI9TUubC4qP7iZRRW28,2440
|
|
81
81
|
pydantic_ai/profiles/__init__.py,sha256=UHknN-CYsQexUaxfsgz_J_uSZ9QwistLSuAErQkvbcM,3385
|
|
@@ -131,8 +131,8 @@ pydantic_ai/toolsets/prefixed.py,sha256=0KwcDkW8OM36ZUsOLVP5h-Nj2tPq78L3_E2c-1Fb
|
|
|
131
131
|
pydantic_ai/toolsets/prepared.py,sha256=Zjfz6S8In6PBVxoKFN9sKPN984zO6t0awB7Lnq5KODw,1431
|
|
132
132
|
pydantic_ai/toolsets/renamed.py,sha256=JuLHpi-hYPiSPlaTpN8WiXLiGsywYK0axi2lW2Qs75k,1637
|
|
133
133
|
pydantic_ai/toolsets/wrapper.py,sha256=KRzF1p8dncHbva8CE6Ud-IC5E_aygIHlwH5atXK55k4,1673
|
|
134
|
-
pydantic_ai_slim-1.
|
|
135
|
-
pydantic_ai_slim-1.
|
|
136
|
-
pydantic_ai_slim-1.
|
|
137
|
-
pydantic_ai_slim-1.
|
|
138
|
-
pydantic_ai_slim-1.
|
|
134
|
+
pydantic_ai_slim-1.4.0.dist-info/METADATA,sha256=KDC3HgFLp5M-yucTuTNbGO9BJe4-YfjRlvjCQEGptAk,4703
|
|
135
|
+
pydantic_ai_slim-1.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
136
|
+
pydantic_ai_slim-1.4.0.dist-info/entry_points.txt,sha256=kbKxe2VtDCYS06hsI7P3uZGxcVC08-FPt1rxeiMpIps,50
|
|
137
|
+
pydantic_ai_slim-1.4.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
|
|
138
|
+
pydantic_ai_slim-1.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|