pydantic-ai-slim 1.0.6__tar.gz → 1.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (127) hide show
  1. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_agent_graph.py +229 -134
  3. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/ag_ui.py +51 -40
  4. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/agent/__init__.py +35 -45
  5. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/agent/abstract.py +7 -7
  6. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/agent/wrapper.py +0 -1
  7. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/dbos/_agent.py +14 -10
  8. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/dbos/_mcp_server.py +4 -2
  9. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_agent.py +0 -1
  10. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_logfire.py +15 -3
  11. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_toolset.py +17 -12
  12. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/mcp.py +5 -0
  13. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/__init__.py +4 -6
  14. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/result.py +3 -5
  15. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/run.py +0 -2
  16. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/tools.py +11 -0
  17. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/function.py +50 -9
  18. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/usage.py +2 -2
  19. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/.gitignore +0 -0
  20. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/LICENSE +0 -0
  21. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/README.md +0 -0
  22. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/__init__.py +0 -0
  23. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/__main__.py +0 -0
  24. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_a2a.py +0 -0
  25. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_cli.py +0 -0
  26. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_function_schema.py +0 -0
  27. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_griffe.py +0 -0
  28. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_mcp.py +0 -0
  29. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_otel_messages.py +0 -0
  30. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_output.py +0 -0
  31. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_parts_manager.py +0 -0
  32. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_run_context.py +0 -0
  33. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_system_prompt.py +0 -0
  34. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_thinking_part.py +0 -0
  35. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_tool_manager.py +0 -0
  36. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/_utils.py +0 -0
  37. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/builtin_tools.py +0 -0
  38. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/common_tools/__init__.py +0 -0
  39. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  40. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/common_tools/tavily.py +0 -0
  41. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/direct.py +0 -0
  42. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/__init__.py +0 -0
  43. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  44. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  45. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  46. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  47. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  48. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  49. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  50. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  51. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/exceptions.py +0 -0
  52. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/ext/__init__.py +0 -0
  53. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/ext/aci.py +0 -0
  54. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/ext/langchain.py +0 -0
  55. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/format_prompt.py +0 -0
  56. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/messages.py +0 -0
  57. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/anthropic.py +0 -0
  58. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/bedrock.py +0 -0
  59. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/cohere.py +0 -0
  60. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/fallback.py +0 -0
  61. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/function.py +0 -0
  62. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/gemini.py +0 -0
  63. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/google.py +0 -0
  64. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/groq.py +0 -0
  65. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/huggingface.py +0 -0
  66. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/instrumented.py +0 -0
  67. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/mcp_sampling.py +0 -0
  68. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/mistral.py +0 -0
  69. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/openai.py +0 -0
  70. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/test.py +0 -0
  71. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/models/wrapper.py +0 -0
  72. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/output.py +0 -0
  73. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/__init__.py +0 -0
  74. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/_json_schema.py +0 -0
  75. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/amazon.py +0 -0
  76. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/anthropic.py +0 -0
  77. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/cohere.py +0 -0
  78. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/deepseek.py +0 -0
  79. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/google.py +0 -0
  80. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/grok.py +0 -0
  81. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/groq.py +0 -0
  82. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/harmony.py +0 -0
  83. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/meta.py +0 -0
  84. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/mistral.py +0 -0
  85. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/moonshotai.py +0 -0
  86. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/openai.py +0 -0
  87. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/profiles/qwen.py +0 -0
  88. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/__init__.py +0 -0
  89. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/anthropic.py +0 -0
  90. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/azure.py +0 -0
  91. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/bedrock.py +0 -0
  92. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/cerebras.py +0 -0
  93. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/cohere.py +0 -0
  94. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/deepseek.py +0 -0
  95. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/fireworks.py +0 -0
  96. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/gateway.py +0 -0
  97. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/github.py +0 -0
  98. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/google.py +0 -0
  99. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/google_gla.py +0 -0
  100. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/google_vertex.py +0 -0
  101. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/grok.py +0 -0
  102. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/groq.py +0 -0
  103. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/heroku.py +0 -0
  104. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/huggingface.py +0 -0
  105. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/litellm.py +0 -0
  106. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/mistral.py +0 -0
  107. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/moonshotai.py +0 -0
  108. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/ollama.py +0 -0
  109. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/openai.py +0 -0
  110. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/openrouter.py +0 -0
  111. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/together.py +0 -0
  112. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/providers/vercel.py +0 -0
  113. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/py.typed +0 -0
  114. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/retries.py +0 -0
  115. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/settings.py +0 -0
  116. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/__init__.py +0 -0
  117. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/_dynamic.py +0 -0
  118. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/abstract.py +0 -0
  119. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/approval_required.py +0 -0
  120. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/combined.py +0 -0
  121. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/external.py +0 -0
  122. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/filtered.py +0 -0
  123. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/prefixed.py +0 -0
  124. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/prepared.py +0 -0
  125. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/renamed.py +0 -0
  126. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pydantic_ai/toolsets/wrapper.py +0 -0
  127. {pydantic_ai_slim-1.0.6 → pydantic_ai_slim-1.0.8}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.6
3
+ Version: 1.0.8
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.6
36
+ Requires-Dist: pydantic-graph==1.0.8
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.6; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.8; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -8,7 +8,8 @@ from collections import defaultdict, deque
8
8
  from collections.abc import AsyncIterator, Awaitable, Callable, Iterator, Sequence
9
9
  from contextlib import asynccontextmanager, contextmanager
10
10
  from contextvars import ContextVar
11
- from dataclasses import field
11
+ from copy import deepcopy
12
+ from dataclasses import field, replace
12
13
  from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, cast
13
14
 
14
15
  from opentelemetry.trace import Tracer
@@ -16,7 +17,7 @@ from typing_extensions import TypeVar, assert_never
16
17
 
17
18
  from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore
18
19
  from pydantic_ai._tool_manager import ToolManager
19
- from pydantic_ai._utils import is_async_callable, run_in_executor
20
+ from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor
20
21
  from pydantic_ai.builtin_tools import AbstractBuiltinTool
21
22
  from pydantic_graph import BaseNode, Graph, GraphRunContext
22
23
  from pydantic_graph.nodes import End, NodeRunEndT
@@ -26,7 +27,9 @@ from .exceptions import ToolRetryError
26
27
  from .output import OutputDataT, OutputSpec
27
28
  from .settings import ModelSettings
28
29
  from .tools import (
30
+ DeferredToolCallResult,
29
31
  DeferredToolResult,
32
+ DeferredToolResults,
30
33
  RunContext,
31
34
  ToolApproved,
32
35
  ToolDefinition,
@@ -123,7 +126,6 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
123
126
 
124
127
  builtin_tools: list[AbstractBuiltinTool] = dataclasses.field(repr=False)
125
128
  tool_manager: ToolManager[DepsT]
126
- tool_call_results: dict[str, DeferredToolResult] | None
127
129
 
128
130
  tracer: Tracer
129
131
  instrumentation_settings: InstrumentationSettings | None
@@ -160,14 +162,18 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
160
162
 
161
163
  _: dataclasses.KW_ONLY
162
164
 
163
- instructions: str | None
164
- instructions_functions: list[_system_prompt.SystemPromptRunner[DepsT]]
165
+ deferred_tool_results: DeferredToolResults | None = None
165
166
 
166
- system_prompts: tuple[str, ...]
167
- system_prompt_functions: list[_system_prompt.SystemPromptRunner[DepsT]]
168
- system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[DepsT]]
167
+ instructions: str | None = None
168
+ instructions_functions: list[_system_prompt.SystemPromptRunner[DepsT]] = dataclasses.field(default_factory=list)
169
169
 
170
- async def run(
170
+ system_prompts: tuple[str, ...] = dataclasses.field(default_factory=tuple)
171
+ system_prompt_functions: list[_system_prompt.SystemPromptRunner[DepsT]] = dataclasses.field(default_factory=list)
172
+ system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[DepsT]] = dataclasses.field(
173
+ default_factory=dict
174
+ )
175
+
176
+ async def run( # noqa: C901
171
177
  self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
172
178
  ) -> ModelRequestNode[DepsT, NodeRunEndT] | CallToolsNode[DepsT, NodeRunEndT]:
173
179
  try:
@@ -181,119 +187,126 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
181
187
  messages = ctx_messages.messages
182
188
  ctx_messages.used = True
183
189
 
184
- # Add message history to the `capture_run_messages` list, which will be empty at this point
185
- messages.extend(ctx.state.message_history)
190
+ # Replace the `capture_run_messages` list with the message history
191
+ messages[:] = _clean_message_history(ctx.state.message_history)
186
192
  # Use the `capture_run_messages` list as the message history so that new messages are added to it
187
193
  ctx.state.message_history = messages
194
+ ctx.deps.new_message_index = len(messages)
188
195
 
189
- run_context = build_run_context(ctx)
190
-
191
- parts: list[_messages.ModelRequestPart] = []
192
- if messages:
193
- # Reevaluate any dynamic system prompt parts
194
- await self._reevaluate_dynamic_prompts(messages, run_context)
195
- else:
196
- parts.extend(await self._sys_parts(run_context))
197
-
198
- if (tool_call_results := ctx.deps.tool_call_results) is not None:
199
- if messages and (last_message := messages[-1]) and isinstance(last_message, _messages.ModelRequest):
200
- # If tool call results were provided, that means the previous run ended on deferred tool calls.
201
- # That run would typically have ended on a `ModelResponse`, but if it had a mix of deferred tool calls and ones that could already be executed,
202
- # a `ModelRequest` would already have been added to the history with the preliminary results, even if it wouldn't have been sent to the model yet.
203
- # So now that we have all of the deferred results, we roll back to the last `ModelResponse` and store the contents of the `ModelRequest` on `deferred_tool_results` to be handled by `CallToolsNode`.
204
- ctx.deps.tool_call_results = self._update_tool_call_results_from_model_request(
205
- tool_call_results, last_message
206
- )
207
- messages.pop()
196
+ if self.deferred_tool_results is not None:
197
+ return await self._handle_deferred_tool_results(self.deferred_tool_results, messages, ctx)
208
198
 
209
- if not messages:
210
- raise exceptions.UserError('Tool call results were provided, but the message history is empty.')
199
+ next_message: _messages.ModelRequest | None = None
211
200
 
212
201
  if messages and (last_message := messages[-1]):
213
202
  if isinstance(last_message, _messages.ModelRequest) and self.user_prompt is None:
214
203
  # Drop last message from history and reuse its parts
215
204
  messages.pop()
216
- parts.extend(last_message.parts)
205
+ next_message = _messages.ModelRequest(parts=last_message.parts)
206
+
207
+ # Extract `UserPromptPart` content from the popped message and add to `ctx.deps.prompt`
208
+ user_prompt_parts = [part for part in last_message.parts if isinstance(part, _messages.UserPromptPart)]
209
+ if user_prompt_parts:
210
+ if len(user_prompt_parts) == 1:
211
+ ctx.deps.prompt = user_prompt_parts[0].content
212
+ else:
213
+ combined_content: list[_messages.UserContent] = []
214
+ for part in user_prompt_parts:
215
+ if isinstance(part.content, str):
216
+ combined_content.append(part.content)
217
+ else:
218
+ combined_content.extend(part.content)
219
+ ctx.deps.prompt = combined_content
217
220
  elif isinstance(last_message, _messages.ModelResponse):
218
- call_tools_node = await self._handle_message_history_model_response(ctx, last_message)
219
- if call_tools_node is not None:
220
- return call_tools_node
221
+ if self.user_prompt is None:
222
+ # Skip ModelRequestNode and go directly to CallToolsNode
223
+ return CallToolsNode[DepsT, NodeRunEndT](last_message)
224
+ elif any(isinstance(part, _messages.ToolCallPart) for part in last_message.parts):
225
+ raise exceptions.UserError(
226
+ 'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
227
+ )
221
228
 
222
- if self.user_prompt is not None:
223
- parts.append(_messages.UserPromptPart(self.user_prompt))
229
+ # Build the run context after `ctx.deps.prompt` has been updated
230
+ run_context = build_run_context(ctx)
224
231
 
225
- instructions = await ctx.deps.get_instructions(run_context)
226
- next_message = _messages.ModelRequest(parts, instructions=instructions)
232
+ parts: list[_messages.ModelRequestPart] = []
233
+ if messages:
234
+ await self._reevaluate_dynamic_prompts(messages, run_context)
227
235
 
228
- return ModelRequestNode[DepsT, NodeRunEndT](request=next_message)
236
+ if next_message:
237
+ await self._reevaluate_dynamic_prompts([next_message], run_context)
238
+ else:
239
+ parts: list[_messages.ModelRequestPart] = []
240
+ if not messages:
241
+ parts.extend(await self._sys_parts(run_context))
229
242
 
230
- async def _handle_message_history_model_response(
231
- self,
232
- ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
233
- message: _messages.ModelResponse,
234
- ) -> CallToolsNode[DepsT, NodeRunEndT] | None:
235
- unprocessed_tool_calls = any(isinstance(part, _messages.ToolCallPart) for part in message.parts)
236
- if unprocessed_tool_calls:
237
243
  if self.user_prompt is not None:
238
- raise exceptions.UserError(
239
- 'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
240
- )
241
- else:
242
- if ctx.deps.tool_call_results is not None:
243
- raise exceptions.UserError(
244
- 'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
245
- )
244
+ parts.append(_messages.UserPromptPart(self.user_prompt))
246
245
 
247
- if unprocessed_tool_calls or self.user_prompt is None:
248
- # `CallToolsNode` requires the tool manager to be prepared for the run step
249
- # This will raise errors for any tool name conflicts
250
- run_context = build_run_context(ctx)
251
- ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
252
-
253
- # Skip ModelRequestNode and go directly to CallToolsNode
254
- return CallToolsNode[DepsT, NodeRunEndT](model_response=message)
255
-
256
- def _update_tool_call_results_from_model_request(
257
- self, tool_call_results: dict[str, DeferredToolResult], message: _messages.ModelRequest
258
- ) -> dict[str, DeferredToolResult]:
259
- last_tool_return: _messages.ToolReturn | None = None
260
- user_content: list[str | _messages.UserContent] = []
261
- for part in message.parts:
262
- if isinstance(part, _messages.ToolReturnPart):
263
- if part.tool_call_id in tool_call_results:
264
- raise exceptions.UserError(
265
- f'Tool call {part.tool_call_id!r} was already executed and its result cannot be overridden.'
266
- )
246
+ next_message = _messages.ModelRequest(parts=parts)
267
247
 
268
- last_tool_return = _messages.ToolReturn(return_value=part.content, metadata=part.metadata)
269
- tool_call_results[part.tool_call_id] = last_tool_return
270
- elif isinstance(part, _messages.RetryPromptPart):
271
- if part.tool_call_id in tool_call_results:
272
- raise exceptions.UserError(
273
- f'Tool call {part.tool_call_id!r} was already executed and its result cannot be overridden.'
274
- )
248
+ next_message.instructions = await ctx.deps.get_instructions(run_context)
275
249
 
276
- tool_call_results[part.tool_call_id] = part
277
- elif isinstance(part, _messages.UserPromptPart):
278
- # Tools can return user parts via `ToolReturn.content` or by returning multi-modal content.
279
- # These go together with a specific `ToolReturnPart`, but we don't have a way to know which,
280
- # so (below) we just add them to the last one, matching the tool-results-before-user-parts order of the request.
281
- if isinstance(part.content, str):
282
- user_content.append(part.content)
283
- else:
284
- user_content.extend(part.content)
285
- else:
286
- raise exceptions.UserError(f'Unexpected message part type: {type(part)}') # pragma: no cover
250
+ return ModelRequestNode[DepsT, NodeRunEndT](request=next_message)
287
251
 
288
- if user_content:
289
- if last_tool_return is None:
290
- raise exceptions.UserError(
291
- 'Tool call results were provided, but the last message in the history was a `ModelRequest` with user parts not tied to preliminary tool results.'
292
- )
293
- assert last_tool_return is not None
294
- last_tool_return.content = user_content
252
+ async def _handle_deferred_tool_results( # noqa: C901
253
+ self,
254
+ deferred_tool_results: DeferredToolResults,
255
+ messages: list[_messages.ModelMessage],
256
+ ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
257
+ ) -> CallToolsNode[DepsT, NodeRunEndT]:
258
+ if not messages:
259
+ raise exceptions.UserError('Tool call results were provided, but the message history is empty.')
260
+
261
+ last_model_request: _messages.ModelRequest | None = None
262
+ last_model_response: _messages.ModelResponse | None = None
263
+ for message in reversed(messages):
264
+ if isinstance(message, _messages.ModelRequest):
265
+ last_model_request = message
266
+ elif isinstance(message, _messages.ModelResponse): # pragma: no branch
267
+ last_model_response = message
268
+ break
269
+
270
+ if not last_model_response:
271
+ raise exceptions.UserError(
272
+ 'Tool call results were provided, but the message history does not contain a `ModelResponse`.'
273
+ )
274
+ if not any(isinstance(part, _messages.ToolCallPart) for part in last_model_response.parts):
275
+ raise exceptions.UserError(
276
+ 'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
277
+ )
278
+ if self.user_prompt is not None:
279
+ raise exceptions.UserError(
280
+ 'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
281
+ )
282
+
283
+ tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None
284
+ tool_call_results = {}
285
+ for tool_call_id, approval in deferred_tool_results.approvals.items():
286
+ if approval is True:
287
+ approval = ToolApproved()
288
+ elif approval is False:
289
+ approval = ToolDenied()
290
+ tool_call_results[tool_call_id] = approval
291
+
292
+ if calls := deferred_tool_results.calls:
293
+ call_result_types = get_union_args(DeferredToolCallResult)
294
+ for tool_call_id, result in calls.items():
295
+ if not isinstance(result, call_result_types):
296
+ result = _messages.ToolReturn(result)
297
+ tool_call_results[tool_call_id] = result
298
+
299
+ if last_model_request:
300
+ for part in last_model_request.parts:
301
+ if isinstance(part, _messages.ToolReturnPart | _messages.RetryPromptPart):
302
+ if part.tool_call_id in tool_call_results:
303
+ raise exceptions.UserError(
304
+ f'Tool call {part.tool_call_id!r} was already executed and its result cannot be overridden.'
305
+ )
306
+ tool_call_results[part.tool_call_id] = 'skip'
295
307
 
296
- return tool_call_results
308
+ # Skip ModelRequestNode and go directly to CallToolsNode
309
+ return CallToolsNode[DepsT, NodeRunEndT](last_model_response, tool_call_results=tool_call_results)
297
310
 
298
311
  async def _reevaluate_dynamic_prompts(
299
312
  self, messages: list[_messages.ModelMessage], run_context: RunContext[DepsT]
@@ -330,6 +343,8 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
330
343
  messages.append(_messages.SystemPromptPart(prompt))
331
344
  return messages
332
345
 
346
+ __repr__ = dataclasses_no_defaults_repr
347
+
333
348
 
334
349
  async def _prepare_request_parameters(
335
350
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
@@ -440,7 +455,19 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
440
455
  # This will raise errors for any tool name conflicts
441
456
  ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
442
457
 
443
- message_history = await _process_message_history(ctx.state, ctx.deps.history_processors, run_context)
458
+ original_history = ctx.state.message_history[:]
459
+ message_history = await _process_message_history(original_history, ctx.deps.history_processors, run_context)
460
+ # Never merge the new `ModelRequest` with the one preceding it, to keep `new_messages()` from accidentally including part of the existing message history
461
+ message_history = [*_clean_message_history(message_history[:-1]), message_history[-1]]
462
+ # `ctx.state.message_history` is the same list used by `capture_run_messages`, so we should replace its contents, not the reference
463
+ ctx.state.message_history[:] = message_history
464
+ # Update the new message index to ensure `result.new_messages()` returns the correct messages
465
+ ctx.deps.new_message_index -= len(original_history) - len(message_history)
466
+
467
+ # Do one more cleaning pass to merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts,
468
+ # but don't store it in the message history on state.
469
+ # See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary
470
+ message_history = _clean_message_history(message_history)
444
471
 
445
472
  model_request_parameters = await _prepare_request_parameters(ctx)
446
473
  model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
@@ -449,7 +476,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
449
476
  usage = ctx.state.usage
450
477
  if ctx.deps.usage_limits.count_tokens_before_request:
451
478
  # Copy to avoid modifying the original usage object with the counted usage
452
- usage = dataclasses.replace(usage)
479
+ usage = deepcopy(usage)
453
480
 
454
481
  counted_usage = await ctx.deps.model.count_tokens(message_history, model_settings, model_request_parameters)
455
482
  usage.incr(counted_usage)
@@ -476,12 +503,15 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
476
503
 
477
504
  return self._result
478
505
 
506
+ __repr__ = dataclasses_no_defaults_repr
507
+
479
508
 
480
509
  @dataclasses.dataclass
481
510
  class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
482
511
  """The node that processes a model response, and decides whether to end the run or make a new request."""
483
512
 
484
513
  model_response: _messages.ModelResponse
514
+ tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None
485
515
 
486
516
  _events_iterator: AsyncIterator[_messages.HandleResponseEvent] | None = field(default=None, init=False, repr=False)
487
517
  _next_node: ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]] | None = field(
@@ -582,11 +612,20 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
582
612
  ) -> AsyncIterator[_messages.HandleResponseEvent]:
583
613
  run_context = build_run_context(ctx)
584
614
 
615
+ # This will raise errors for any tool name conflicts
616
+ ctx.deps.tool_manager = await ctx.deps.tool_manager.for_run_step(run_context)
617
+
585
618
  output_parts: list[_messages.ModelRequestPart] = []
586
619
  output_final_result: deque[result.FinalResult[NodeRunEndT]] = deque(maxlen=1)
587
620
 
588
- async for event in process_function_tools(
589
- ctx.deps.tool_manager, tool_calls, None, ctx, output_parts, output_final_result
621
+ async for event in process_tool_calls(
622
+ tool_manager=ctx.deps.tool_manager,
623
+ tool_calls=tool_calls,
624
+ tool_call_results=self.tool_call_results,
625
+ final_result=None,
626
+ ctx=ctx,
627
+ output_parts=output_parts,
628
+ output_final_result=output_final_result,
590
629
  ):
591
630
  yield event
592
631
 
@@ -639,6 +678,8 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
639
678
  else:
640
679
  return self._handle_final_result(ctx, result.FinalResult(result_data), [])
641
680
 
681
+ __repr__ = dataclasses_no_defaults_repr
682
+
642
683
 
643
684
  def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, Any]]) -> RunContext[DepsT]:
644
685
  """Build a `RunContext` object from the current agent graph run context."""
@@ -652,13 +693,14 @@ def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT
652
693
  trace_include_content=ctx.deps.instrumentation_settings is not None
653
694
  and ctx.deps.instrumentation_settings.include_content,
654
695
  run_step=ctx.state.run_step,
655
- tool_call_approved=ctx.state.run_step == 0 and ctx.deps.tool_call_results is not None,
696
+ tool_call_approved=ctx.state.run_step == 0,
656
697
  )
657
698
 
658
699
 
659
- async def process_function_tools( # noqa: C901
700
+ async def process_tool_calls( # noqa: C901
660
701
  tool_manager: ToolManager[DepsT],
661
702
  tool_calls: list[_messages.ToolCallPart],
703
+ tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None,
662
704
  final_result: result.FinalResult[NodeRunEndT] | None,
663
705
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
664
706
  output_parts: list[_messages.ModelRequestPart],
@@ -739,14 +781,13 @@ async def process_function_tools( # noqa: C901
739
781
  ctx.state.increment_retries(ctx.deps.max_result_retries)
740
782
  calls_to_run.extend(tool_calls_by_kind['unknown'])
741
783
 
742
- deferred_tool_results: dict[str, DeferredToolResult] = {}
743
- if build_run_context(ctx).tool_call_approved and ctx.deps.tool_call_results is not None:
744
- deferred_tool_results = ctx.deps.tool_call_results
784
+ calls_to_run_results: dict[str, DeferredToolResult] = {}
785
+ if tool_call_results is not None:
745
786
  # Deferred tool calls are "run" as well, by reading their value from the tool call results
746
787
  calls_to_run.extend(tool_calls_by_kind['external'])
747
788
  calls_to_run.extend(tool_calls_by_kind['unapproved'])
748
789
 
749
- result_tool_call_ids = set(deferred_tool_results.keys())
790
+ result_tool_call_ids = set(tool_call_results.keys())
750
791
  tool_call_ids_to_run = {call.tool_call_id for call in calls_to_run}
751
792
  if tool_call_ids_to_run != result_tool_call_ids:
752
793
  raise exceptions.UserError(
@@ -754,24 +795,29 @@ async def process_function_tools( # noqa: C901
754
795
  f'Expected: {tool_call_ids_to_run}, got: {result_tool_call_ids}'
755
796
  )
756
797
 
798
+ # Filter out calls that were already executed before and should now be skipped
799
+ calls_to_run_results = {call_id: result for call_id, result in tool_call_results.items() if result != 'skip'}
800
+ calls_to_run = [call for call in calls_to_run if call.tool_call_id in calls_to_run_results]
801
+
757
802
  deferred_calls: dict[Literal['external', 'unapproved'], list[_messages.ToolCallPart]] = defaultdict(list)
758
803
 
759
804
  if calls_to_run:
760
805
  async for event in _call_tools(
761
- tool_manager,
762
- calls_to_run,
763
- deferred_tool_results,
764
- ctx.deps.tracer,
765
- ctx.deps.usage_limits,
766
- output_parts,
767
- deferred_calls,
806
+ tool_manager=tool_manager,
807
+ tool_calls=calls_to_run,
808
+ tool_call_results=calls_to_run_results,
809
+ tracer=ctx.deps.tracer,
810
+ usage_limits=ctx.deps.usage_limits,
811
+ output_parts=output_parts,
812
+ output_deferred_calls=deferred_calls,
768
813
  ):
769
814
  yield event
770
815
 
771
816
  # Finally, we handle deferred tool calls (unless they were already included in the run because results were provided)
772
- if not deferred_tool_results:
817
+ if tool_call_results is None:
818
+ calls = [*tool_calls_by_kind['external'], *tool_calls_by_kind['unapproved']]
773
819
  if final_result:
774
- for call in [*tool_calls_by_kind['external'], *tool_calls_by_kind['unapproved']]:
820
+ for call in calls:
775
821
  output_parts.append(
776
822
  _messages.ToolReturnPart(
777
823
  tool_name=call.tool_name,
@@ -779,13 +825,11 @@ async def process_function_tools( # noqa: C901
779
825
  tool_call_id=call.tool_call_id,
780
826
  )
781
827
  )
782
- else:
783
- for call in tool_calls_by_kind['external']:
784
- deferred_calls['external'].append(call)
785
- yield _messages.FunctionToolCallEvent(call)
828
+ elif calls:
829
+ deferred_calls['external'].extend(tool_calls_by_kind['external'])
830
+ deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved'])
786
831
 
787
- for call in tool_calls_by_kind['unapproved']:
788
- deferred_calls['unapproved'].append(call)
832
+ for call in calls:
789
833
  yield _messages.FunctionToolCallEvent(call)
790
834
 
791
835
  if not final_result and deferred_calls:
@@ -807,7 +851,7 @@ async def process_function_tools( # noqa: C901
807
851
  async def _call_tools(
808
852
  tool_manager: ToolManager[DepsT],
809
853
  tool_calls: list[_messages.ToolCallPart],
810
- deferred_tool_results: dict[str, DeferredToolResult],
854
+ tool_call_results: dict[str, DeferredToolResult],
811
855
  tracer: Tracer,
812
856
  usage_limits: _usage.UsageLimits | None,
813
857
  output_parts: list[_messages.ModelRequestPart],
@@ -853,7 +897,7 @@ async def _call_tools(
853
897
  if tool_manager.should_call_sequentially(tool_calls):
854
898
  for index, call in enumerate(tool_calls):
855
899
  if event := await handle_call_or_result(
856
- _call_tool(tool_manager, call, deferred_tool_results.get(call.tool_call_id), usage_limits),
900
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
857
901
  index,
858
902
  ):
859
903
  yield event
@@ -861,7 +905,7 @@ async def _call_tools(
861
905
  else:
862
906
  tasks = [
863
907
  asyncio.create_task(
864
- _call_tool(tool_manager, call, deferred_tool_results.get(call.tool_call_id), usage_limits),
908
+ _call_tool(tool_manager, call, tool_call_results.get(call.tool_call_id), usage_limits),
865
909
  name=call.tool_name,
866
910
  )
867
911
  for call in tool_calls
@@ -1053,12 +1097,11 @@ def build_agent_graph(
1053
1097
 
1054
1098
 
1055
1099
  async def _process_message_history(
1056
- state: GraphAgentState,
1100
+ messages: list[_messages.ModelMessage],
1057
1101
  processors: Sequence[HistoryProcessor[DepsT]],
1058
1102
  run_context: RunContext[DepsT],
1059
1103
  ) -> list[_messages.ModelMessage]:
1060
1104
  """Process message history through a sequence of processors."""
1061
- messages = state.message_history
1062
1105
  for processor in processors:
1063
1106
  takes_ctx = is_takes_ctx(processor)
1064
1107
 
@@ -1076,6 +1119,58 @@ async def _process_message_history(
1076
1119
  sync_processor = cast(_HistoryProcessorSync, processor)
1077
1120
  messages = await run_in_executor(sync_processor, messages)
1078
1121
 
1079
- # Replaces the message history in the state with the processed messages
1080
- state.message_history = messages
1122
+ if len(messages) == 0:
1123
+ raise exceptions.UserError('Processed history cannot be empty.')
1124
+
1125
+ if not isinstance(messages[-1], _messages.ModelRequest):
1126
+ raise exceptions.UserError('Processed history must end with a `ModelRequest`.')
1127
+
1081
1128
  return messages
1129
+
1130
+
1131
+ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_messages.ModelMessage]:
1132
+ """Clean the message history by merging consecutive messages of the same type."""
1133
+ clean_messages: list[_messages.ModelMessage] = []
1134
+ for message in messages:
1135
+ last_message = clean_messages[-1] if len(clean_messages) > 0 else None
1136
+
1137
+ if isinstance(message, _messages.ModelRequest):
1138
+ if (
1139
+ last_message
1140
+ and isinstance(last_message, _messages.ModelRequest)
1141
+ # Requests can only be merged if they have the same instructions
1142
+ and (
1143
+ not last_message.instructions
1144
+ or not message.instructions
1145
+ or last_message.instructions == message.instructions
1146
+ )
1147
+ ):
1148
+ parts = [*last_message.parts, *message.parts]
1149
+ parts.sort(
1150
+ # Tool return parts always need to be at the start
1151
+ key=lambda x: 0 if isinstance(x, _messages.ToolReturnPart | _messages.RetryPromptPart) else 1
1152
+ )
1153
+ merged_message = _messages.ModelRequest(
1154
+ parts=parts,
1155
+ instructions=last_message.instructions or message.instructions,
1156
+ )
1157
+ clean_messages[-1] = merged_message
1158
+ else:
1159
+ clean_messages.append(message)
1160
+ elif isinstance(message, _messages.ModelResponse): # pragma: no branch
1161
+ if (
1162
+ last_message
1163
+ and isinstance(last_message, _messages.ModelResponse)
1164
+ # Responses can only be merged if they didn't really come from an API
1165
+ and last_message.provider_response_id is None
1166
+ and last_message.provider_name is None
1167
+ and last_message.model_name is None
1168
+ and message.provider_response_id is None
1169
+ and message.provider_name is None
1170
+ and message.model_name is None
1171
+ ):
1172
+ merged_message = replace(last_message, parts=[*last_message.parts, *message.parts])
1173
+ clean_messages[-1] = merged_message
1174
+ else:
1175
+ clean_messages.append(message)
1176
+ return clean_messages