agentpool 2.2.3__py3-none-any.whl → 2.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. acp/__init__.py +0 -4
  2. acp/acp_requests.py +20 -77
  3. acp/agent/connection.py +8 -0
  4. acp/agent/implementations/debug_server/debug_server.py +6 -2
  5. acp/agent/protocol.py +6 -0
  6. acp/client/connection.py +38 -29
  7. acp/client/implementations/default_client.py +3 -2
  8. acp/client/implementations/headless_client.py +2 -2
  9. acp/connection.py +2 -2
  10. acp/notifications.py +18 -49
  11. acp/schema/__init__.py +2 -0
  12. acp/schema/agent_responses.py +21 -0
  13. acp/schema/client_requests.py +3 -3
  14. acp/schema/session_state.py +63 -29
  15. acp/task/supervisor.py +2 -2
  16. acp/utils.py +2 -2
  17. agentpool/__init__.py +2 -0
  18. agentpool/agents/acp_agent/acp_agent.py +278 -263
  19. agentpool/agents/acp_agent/acp_converters.py +150 -17
  20. agentpool/agents/acp_agent/client_handler.py +35 -24
  21. agentpool/agents/acp_agent/session_state.py +14 -6
  22. agentpool/agents/agent.py +471 -643
  23. agentpool/agents/agui_agent/agui_agent.py +104 -107
  24. agentpool/agents/agui_agent/helpers.py +3 -4
  25. agentpool/agents/base_agent.py +485 -32
  26. agentpool/agents/claude_code_agent/FORKING.md +191 -0
  27. agentpool/agents/claude_code_agent/__init__.py +13 -1
  28. agentpool/agents/claude_code_agent/claude_code_agent.py +654 -334
  29. agentpool/agents/claude_code_agent/converters.py +4 -141
  30. agentpool/agents/claude_code_agent/models.py +77 -0
  31. agentpool/agents/claude_code_agent/static_info.py +100 -0
  32. agentpool/agents/claude_code_agent/usage.py +242 -0
  33. agentpool/agents/events/__init__.py +22 -0
  34. agentpool/agents/events/builtin_handlers.py +65 -0
  35. agentpool/agents/events/event_emitter.py +3 -0
  36. agentpool/agents/events/events.py +84 -3
  37. agentpool/agents/events/infer_info.py +145 -0
  38. agentpool/agents/events/processors.py +254 -0
  39. agentpool/agents/interactions.py +41 -6
  40. agentpool/agents/modes.py +13 -0
  41. agentpool/agents/slashed_agent.py +5 -4
  42. agentpool/agents/tool_wrapping.py +18 -6
  43. agentpool/common_types.py +35 -21
  44. agentpool/config_resources/acp_assistant.yml +2 -2
  45. agentpool/config_resources/agents.yml +3 -0
  46. agentpool/config_resources/agents_template.yml +1 -0
  47. agentpool/config_resources/claude_code_agent.yml +9 -8
  48. agentpool/config_resources/external_acp_agents.yml +2 -1
  49. agentpool/delegation/base_team.py +4 -30
  50. agentpool/delegation/pool.py +104 -265
  51. agentpool/delegation/team.py +57 -57
  52. agentpool/delegation/teamrun.py +50 -55
  53. agentpool/functional/run.py +10 -4
  54. agentpool/mcp_server/client.py +73 -38
  55. agentpool/mcp_server/conversions.py +54 -13
  56. agentpool/mcp_server/manager.py +9 -23
  57. agentpool/mcp_server/registries/official_registry_client.py +10 -1
  58. agentpool/mcp_server/tool_bridge.py +114 -79
  59. agentpool/messaging/connection_manager.py +11 -10
  60. agentpool/messaging/event_manager.py +5 -5
  61. agentpool/messaging/message_container.py +6 -30
  62. agentpool/messaging/message_history.py +87 -8
  63. agentpool/messaging/messagenode.py +52 -14
  64. agentpool/messaging/messages.py +2 -26
  65. agentpool/messaging/processing.py +10 -22
  66. agentpool/models/__init__.py +1 -1
  67. agentpool/models/acp_agents/base.py +6 -2
  68. agentpool/models/acp_agents/mcp_capable.py +124 -15
  69. agentpool/models/acp_agents/non_mcp.py +0 -23
  70. agentpool/models/agents.py +66 -66
  71. agentpool/models/agui_agents.py +1 -1
  72. agentpool/models/claude_code_agents.py +111 -17
  73. agentpool/models/file_parsing.py +0 -1
  74. agentpool/models/manifest.py +70 -50
  75. agentpool/prompts/conversion_manager.py +1 -1
  76. agentpool/prompts/prompts.py +5 -2
  77. agentpool/resource_providers/__init__.py +2 -0
  78. agentpool/resource_providers/aggregating.py +4 -2
  79. agentpool/resource_providers/base.py +13 -3
  80. agentpool/resource_providers/codemode/code_executor.py +72 -5
  81. agentpool/resource_providers/codemode/helpers.py +2 -2
  82. agentpool/resource_providers/codemode/provider.py +64 -12
  83. agentpool/resource_providers/codemode/remote_mcp_execution.py +2 -2
  84. agentpool/resource_providers/codemode/remote_provider.py +9 -12
  85. agentpool/resource_providers/filtering.py +3 -1
  86. agentpool/resource_providers/mcp_provider.py +66 -12
  87. agentpool/resource_providers/plan_provider.py +111 -18
  88. agentpool/resource_providers/pool.py +5 -3
  89. agentpool/resource_providers/resource_info.py +111 -0
  90. agentpool/resource_providers/static.py +2 -2
  91. agentpool/sessions/__init__.py +2 -0
  92. agentpool/sessions/manager.py +2 -3
  93. agentpool/sessions/models.py +9 -6
  94. agentpool/sessions/protocol.py +28 -0
  95. agentpool/sessions/session.py +11 -55
  96. agentpool/storage/manager.py +361 -54
  97. agentpool/talk/registry.py +4 -4
  98. agentpool/talk/talk.py +9 -10
  99. agentpool/testing.py +1 -1
  100. agentpool/tool_impls/__init__.py +6 -0
  101. agentpool/tool_impls/agent_cli/__init__.py +42 -0
  102. agentpool/tool_impls/agent_cli/tool.py +95 -0
  103. agentpool/tool_impls/bash/__init__.py +64 -0
  104. agentpool/tool_impls/bash/helpers.py +35 -0
  105. agentpool/tool_impls/bash/tool.py +171 -0
  106. agentpool/tool_impls/delete_path/__init__.py +70 -0
  107. agentpool/tool_impls/delete_path/tool.py +142 -0
  108. agentpool/tool_impls/download_file/__init__.py +80 -0
  109. agentpool/tool_impls/download_file/tool.py +183 -0
  110. agentpool/tool_impls/execute_code/__init__.py +55 -0
  111. agentpool/tool_impls/execute_code/tool.py +163 -0
  112. agentpool/tool_impls/grep/__init__.py +80 -0
  113. agentpool/tool_impls/grep/tool.py +200 -0
  114. agentpool/tool_impls/list_directory/__init__.py +73 -0
  115. agentpool/tool_impls/list_directory/tool.py +197 -0
  116. agentpool/tool_impls/question/__init__.py +42 -0
  117. agentpool/tool_impls/question/tool.py +127 -0
  118. agentpool/tool_impls/read/__init__.py +104 -0
  119. agentpool/tool_impls/read/tool.py +305 -0
  120. agentpool/tools/__init__.py +2 -1
  121. agentpool/tools/base.py +114 -34
  122. agentpool/tools/manager.py +57 -1
  123. agentpool/ui/base.py +2 -2
  124. agentpool/ui/mock_provider.py +2 -2
  125. agentpool/ui/stdlib_provider.py +2 -2
  126. agentpool/utils/streams.py +21 -96
  127. agentpool/vfs_registry.py +7 -2
  128. {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/METADATA +16 -22
  129. {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/RECORD +242 -195
  130. {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/WHEEL +1 -1
  131. agentpool_cli/__main__.py +20 -0
  132. agentpool_cli/create.py +1 -1
  133. agentpool_cli/serve_acp.py +59 -1
  134. agentpool_cli/serve_opencode.py +1 -1
  135. agentpool_cli/ui.py +557 -0
  136. agentpool_commands/__init__.py +12 -5
  137. agentpool_commands/agents.py +1 -1
  138. agentpool_commands/pool.py +260 -0
  139. agentpool_commands/session.py +1 -1
  140. agentpool_commands/text_sharing/__init__.py +119 -0
  141. agentpool_commands/text_sharing/base.py +123 -0
  142. agentpool_commands/text_sharing/github_gist.py +80 -0
  143. agentpool_commands/text_sharing/opencode.py +462 -0
  144. agentpool_commands/text_sharing/paste_rs.py +59 -0
  145. agentpool_commands/text_sharing/pastebin.py +116 -0
  146. agentpool_commands/text_sharing/shittycodingagent.py +112 -0
  147. agentpool_commands/utils.py +31 -32
  148. agentpool_config/__init__.py +30 -2
  149. agentpool_config/agentpool_tools.py +498 -0
  150. agentpool_config/converters.py +1 -1
  151. agentpool_config/event_handlers.py +42 -0
  152. agentpool_config/events.py +1 -1
  153. agentpool_config/forward_targets.py +1 -4
  154. agentpool_config/jinja.py +3 -3
  155. agentpool_config/mcp_server.py +1 -5
  156. agentpool_config/nodes.py +1 -1
  157. agentpool_config/observability.py +44 -0
  158. agentpool_config/session.py +0 -3
  159. agentpool_config/storage.py +38 -39
  160. agentpool_config/task.py +3 -3
  161. agentpool_config/tools.py +11 -28
  162. agentpool_config/toolsets.py +22 -90
  163. agentpool_server/a2a_server/agent_worker.py +307 -0
  164. agentpool_server/a2a_server/server.py +23 -18
  165. agentpool_server/acp_server/acp_agent.py +125 -56
  166. agentpool_server/acp_server/commands/acp_commands.py +46 -216
  167. agentpool_server/acp_server/commands/docs_commands/fetch_repo.py +8 -7
  168. agentpool_server/acp_server/event_converter.py +651 -0
  169. agentpool_server/acp_server/input_provider.py +53 -10
  170. agentpool_server/acp_server/server.py +1 -11
  171. agentpool_server/acp_server/session.py +90 -410
  172. agentpool_server/acp_server/session_manager.py +8 -34
  173. agentpool_server/agui_server/server.py +3 -1
  174. agentpool_server/mcp_server/server.py +5 -2
  175. agentpool_server/opencode_server/ENDPOINTS.md +53 -14
  176. agentpool_server/opencode_server/OPENCODE_UI_TOOLS_COMPLETE.md +202 -0
  177. agentpool_server/opencode_server/__init__.py +0 -8
  178. agentpool_server/opencode_server/converters.py +132 -26
  179. agentpool_server/opencode_server/input_provider.py +160 -8
  180. agentpool_server/opencode_server/models/__init__.py +42 -20
  181. agentpool_server/opencode_server/models/app.py +12 -0
  182. agentpool_server/opencode_server/models/events.py +203 -29
  183. agentpool_server/opencode_server/models/mcp.py +19 -0
  184. agentpool_server/opencode_server/models/message.py +18 -1
  185. agentpool_server/opencode_server/models/parts.py +134 -1
  186. agentpool_server/opencode_server/models/question.py +56 -0
  187. agentpool_server/opencode_server/models/session.py +13 -1
  188. agentpool_server/opencode_server/routes/__init__.py +4 -0
  189. agentpool_server/opencode_server/routes/agent_routes.py +33 -2
  190. agentpool_server/opencode_server/routes/app_routes.py +66 -3
  191. agentpool_server/opencode_server/routes/config_routes.py +66 -5
  192. agentpool_server/opencode_server/routes/file_routes.py +184 -5
  193. agentpool_server/opencode_server/routes/global_routes.py +1 -1
  194. agentpool_server/opencode_server/routes/lsp_routes.py +1 -1
  195. agentpool_server/opencode_server/routes/message_routes.py +122 -66
  196. agentpool_server/opencode_server/routes/permission_routes.py +63 -0
  197. agentpool_server/opencode_server/routes/pty_routes.py +23 -22
  198. agentpool_server/opencode_server/routes/question_routes.py +128 -0
  199. agentpool_server/opencode_server/routes/session_routes.py +139 -68
  200. agentpool_server/opencode_server/routes/tui_routes.py +1 -1
  201. agentpool_server/opencode_server/server.py +47 -2
  202. agentpool_server/opencode_server/state.py +30 -0
  203. agentpool_storage/__init__.py +0 -4
  204. agentpool_storage/base.py +81 -2
  205. agentpool_storage/claude_provider/ARCHITECTURE.md +433 -0
  206. agentpool_storage/claude_provider/__init__.py +42 -0
  207. agentpool_storage/{claude_provider.py → claude_provider/provider.py} +190 -8
  208. agentpool_storage/file_provider.py +149 -15
  209. agentpool_storage/memory_provider.py +132 -12
  210. agentpool_storage/opencode_provider/ARCHITECTURE.md +386 -0
  211. agentpool_storage/opencode_provider/__init__.py +16 -0
  212. agentpool_storage/opencode_provider/helpers.py +414 -0
  213. agentpool_storage/opencode_provider/provider.py +895 -0
  214. agentpool_storage/session_store.py +20 -6
  215. agentpool_storage/sql_provider/sql_provider.py +135 -2
  216. agentpool_storage/sql_provider/utils.py +2 -12
  217. agentpool_storage/zed_provider/__init__.py +16 -0
  218. agentpool_storage/zed_provider/helpers.py +281 -0
  219. agentpool_storage/zed_provider/models.py +130 -0
  220. agentpool_storage/zed_provider/provider.py +442 -0
  221. agentpool_storage/zed_provider.py +803 -0
  222. agentpool_toolsets/__init__.py +0 -2
  223. agentpool_toolsets/builtin/__init__.py +2 -4
  224. agentpool_toolsets/builtin/code.py +4 -4
  225. agentpool_toolsets/builtin/debug.py +115 -40
  226. agentpool_toolsets/builtin/execution_environment.py +54 -165
  227. agentpool_toolsets/builtin/skills.py +0 -77
  228. agentpool_toolsets/builtin/subagent_tools.py +64 -51
  229. agentpool_toolsets/builtin/workers.py +4 -2
  230. agentpool_toolsets/composio_toolset.py +2 -2
  231. agentpool_toolsets/entry_points.py +3 -1
  232. agentpool_toolsets/fsspec_toolset/grep.py +25 -5
  233. agentpool_toolsets/fsspec_toolset/helpers.py +3 -2
  234. agentpool_toolsets/fsspec_toolset/toolset.py +350 -66
  235. agentpool_toolsets/mcp_discovery/data/mcp_servers.parquet +0 -0
  236. agentpool_toolsets/mcp_discovery/toolset.py +74 -17
  237. agentpool_toolsets/mcp_run_toolset.py +8 -11
  238. agentpool_toolsets/notifications.py +33 -33
  239. agentpool_toolsets/openapi.py +3 -1
  240. agentpool_toolsets/search_toolset.py +3 -1
  241. agentpool_config/resources.py +0 -33
  242. agentpool_server/acp_server/acp_tools.py +0 -43
  243. agentpool_server/acp_server/commands/spawn.py +0 -210
  244. agentpool_storage/opencode_provider.py +0 -730
  245. agentpool_storage/text_log_provider.py +0 -276
  246. agentpool_toolsets/builtin/chain.py +0 -288
  247. agentpool_toolsets/builtin/user_interaction.py +0 -52
  248. agentpool_toolsets/semantic_memory_toolset.py +0 -536
  249. {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/entry_points.txt +0 -0
  250. {agentpool-2.2.3.dist-info → agentpool-2.5.0.dist-info}/licenses/LICENSE +0 -0
agentpool/agents/agent.py CHANGED
@@ -3,18 +3,15 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import asyncio
6
- from collections.abc import Awaitable, Callable
7
- from contextlib import AsyncExitStack, asynccontextmanager, suppress
8
- from dataclasses import dataclass, field, replace
6
+ from collections.abc import Awaitable
7
+ from contextlib import AsyncExitStack, asynccontextmanager
8
+ from dataclasses import replace
9
9
  import time
10
10
  from typing import TYPE_CHECKING, Any, Self, TypedDict, TypeVar, overload
11
11
  from uuid import uuid4
12
12
 
13
13
  from anyenv import method_spawner
14
- import anyio
15
- from llmling_models import function_to_model, infer_model
16
14
  import logfire
17
- from psygnal import Signal
18
15
  from pydantic import ValidationError
19
16
  from pydantic._internal import _typing_extra
20
17
  from pydantic_ai import (
@@ -28,6 +25,7 @@ from pydantic_ai import (
28
25
  RunContext,
29
26
  ToolReturnPart,
30
27
  )
28
+ from pydantic_ai.models import Model
31
29
 
32
30
  from agentpool.agents.base_agent import BaseAgent
33
31
  from agentpool.agents.events import (
@@ -35,59 +33,27 @@ from agentpool.agents.events import (
35
33
  StreamCompleteEvent,
36
34
  ToolCallCompleteEvent,
37
35
  )
36
+ from agentpool.agents.events.processors import FileTracker
38
37
  from agentpool.agents.modes import ModeInfo
39
38
  from agentpool.log import get_logger
40
- from agentpool.messaging import ChatMessage, MessageHistory, MessageNode
41
- from agentpool.messaging.processing import prepare_prompts
39
+ from agentpool.messaging import ChatMessage, MessageHistory
42
40
  from agentpool.prompts.convert import convert_prompts
43
41
  from agentpool.storage import StorageManager
44
42
  from agentpool.tools import Tool, ToolManager
45
43
  from agentpool.tools.exceptions import ToolError
46
- from agentpool.utils.inspection import call_with_context, get_argument_key
47
- from agentpool.utils.now import get_now
44
+ from agentpool.utils.inspection import get_argument_key
48
45
  from agentpool.utils.pydantic_ai_helpers import safe_args_as_dict
49
46
  from agentpool.utils.result_utils import to_type
50
- from agentpool.utils.streams import FileTracker, merge_queue_into_iterator
51
-
52
-
53
- TResult = TypeVar("TResult")
54
-
55
-
56
- def _extract_text_from_messages(
57
- messages: list[Any], include_interruption_note: bool = False
58
- ) -> str:
59
- """Extract text content from pydantic-ai messages.
60
-
61
- Args:
62
- messages: List of ModelRequest/ModelResponse messages
63
- include_interruption_note: Whether to append interruption notice
64
-
65
- Returns:
66
- Concatenated text content from all ModelResponse TextParts
67
- """
68
- from pydantic_ai.messages import ModelResponse, TextPart as PydanticTextPart
69
-
70
- content = "".join(
71
- part.content
72
- for msg in messages
73
- if isinstance(msg, ModelResponse)
74
- for part in msg.parts
75
- if isinstance(part, PydanticTextPart)
76
- )
77
- if include_interruption_note:
78
- if content:
79
- content += "\n\n"
80
- content += "[Request interrupted by user]"
81
- return content
47
+ from agentpool.utils.streams import merge_queue_into_iterator
82
48
 
83
49
 
84
50
  if TYPE_CHECKING:
85
- from collections.abc import AsyncIterator, Coroutine, Sequence
86
- from datetime import datetime
51
+ from collections.abc import AsyncIterator, Callable, Coroutine, Sequence
87
52
  from types import TracebackType
88
53
 
89
54
  from exxec import ExecutionEnvironment
90
- from pydantic_ai import UsageLimits
55
+ from llmling_models_config import AnyModelConfig
56
+ from pydantic_ai import UsageLimits, UserContent
91
57
  from pydantic_ai.builtin_tools import AbstractBuiltinTool
92
58
  from pydantic_ai.output import OutputSpec
93
59
  from pydantic_ai.settings import ModelSettings
@@ -101,21 +67,22 @@ if TYPE_CHECKING:
101
67
  from agentpool.agents.events import RichAgentStreamEvent
102
68
  from agentpool.agents.modes import ModeCategory
103
69
  from agentpool.common_types import (
104
- AgentName,
105
70
  BuiltinEventHandlerType,
106
71
  EndStrategy,
107
72
  IndividualEventHandler,
108
73
  ModelType,
109
74
  ProcessorCallback,
110
- PromptCompatible,
111
75
  SessionIdType,
112
76
  ToolType,
113
77
  )
114
- from agentpool.delegation import AgentPool, Team, TeamRun
78
+ from agentpool.delegation import AgentPool
115
79
  from agentpool.hooks import AgentHooks
80
+ from agentpool.messaging import MessageNode
116
81
  from agentpool.models.agents import NativeAgentConfig, ToolMode
82
+ from agentpool.models.manifest import AgentsManifest
117
83
  from agentpool.prompts.prompts import PromptType
118
84
  from agentpool.resource_providers import ResourceProvider
85
+ from agentpool.tools.base import FunctionTool
119
86
  from agentpool.ui.base import InputProvider
120
87
  from agentpool_config.knowledge import Knowledge
121
88
  from agentpool_config.mcp_server import MCPServerConfig
@@ -128,6 +95,36 @@ logger = get_logger(__name__)
128
95
  # OutputDataT = TypeVar('OutputDataT', default=str, covariant=True)
129
96
  NoneType = type(None)
130
97
 
98
+ TResult = TypeVar("TResult")
99
+
100
+
101
+ def _extract_text_from_messages(
102
+ messages: list[Any], include_interruption_note: bool = False
103
+ ) -> str:
104
+ """Extract text content from pydantic-ai messages.
105
+
106
+ Args:
107
+ messages: List of ModelRequest/ModelResponse messages
108
+ include_interruption_note: Whether to append interruption notice
109
+
110
+ Returns:
111
+ Concatenated text content from all ModelResponse TextParts
112
+ """
113
+ from pydantic_ai.messages import ModelResponse, TextPart as PydanticTextPart
114
+
115
+ content = "".join(
116
+ part.content
117
+ for msg in messages
118
+ if isinstance(msg, ModelResponse)
119
+ for part in msg.parts
120
+ if isinstance(part, PydanticTextPart)
121
+ )
122
+ if include_interruption_note:
123
+ if content:
124
+ content += "\n\n"
125
+ content += "[Request interrupted by user]"
126
+ return content
127
+
131
128
 
132
129
  class AgentKwargs(TypedDict, total=False):
133
130
  """Keyword arguments for configuring an Agent instance."""
@@ -160,25 +157,13 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
160
157
  Generically typed with: Agent[Type of Dependencies, Type of Result]
161
158
  """
162
159
 
163
- @dataclass(frozen=True)
164
- class AgentReset:
165
- """Emitted when agent is reset."""
166
-
167
- agent_name: AgentName
168
- previous_tools: dict[str, bool]
169
- new_tools: dict[str, bool]
170
- timestamp: datetime = field(default_factory=get_now)
171
-
172
- run_failed = Signal(str, Exception)
173
- agent_reset = Signal(AgentReset)
174
-
175
- def __init__(
160
+ def __init__( # noqa: PLR0915
176
161
  # we dont use AgentKwargs here so that we can work with explicit ones in the ctor
177
162
  self,
178
163
  name: str = "agentpool",
179
164
  *,
180
165
  deps_type: type[TDeps] | None = None,
181
- model: ModelType = None,
166
+ model: ModelType,
182
167
  output_type: OutputSpec[OutputDataT] = str, # type: ignore[assignment]
183
168
  # context: AgentContext[TDeps] | None = None,
184
169
  session: SessionIdType | SessionQuery | MemoryConfig | bool | int = None,
@@ -250,18 +235,21 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
250
235
  hooks: AgentHooks instance for intercepting agent behavior at run and tool events
251
236
  tool_confirmation_mode: Tool confirmation mode
252
237
  builtin_tools: PydanticAI builtin tools (WebSearchTool, CodeExecutionTool, etc.)
253
- usage_limits: Usage limits for the agent
238
+ usage_limits: Per-request usage limits (applied to each run() call independently,
239
+ not cumulative across the session)
254
240
  providers: Model providers for model discovery (e.g., ["openai", "anthropic"]).
255
241
  Defaults to ["models.dev"] if not specified.
256
242
  commands: Slash commands
257
243
  """
244
+ from llmling_models_config import StringModelConfig
245
+
258
246
  from agentpool.agents.interactions import Interactions
259
247
  from agentpool.agents.sys_prompts import SystemPrompts
260
248
  from agentpool.models.agents import NativeAgentConfig
261
249
  from agentpool.prompts.conversion_manager import ConversionManager
250
+ from agentpool_commands.pool import CompactCommand
262
251
  from agentpool_config.session import MemoryConfig
263
252
 
264
- self._infinite = False
265
253
  self.deps_type = deps_type
266
254
  self.model_settings = model_settings
267
255
  memory_cfg = (
@@ -271,7 +259,10 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
271
259
  all_mcp_servers = list(mcp_servers) if mcp_servers else []
272
260
  if agent_config and agent_config.mcp_servers:
273
261
  all_mcp_servers.extend(agent_config.get_mcp_servers())
274
-
262
+ # Add CompactCommand - only makes sense for Native Agent (has own history)
263
+ # Other agents (ClaudeCode, ACP, AGUI) don't control their history directly
264
+ all_commands = list(commands) if commands else []
265
+ all_commands.append(CompactCommand())
275
266
  # Call base class with shared parameters
276
267
  super().__init__(
277
268
  name=name,
@@ -286,15 +277,28 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
286
277
  output_type=to_type(output_type), # type: ignore[arg-type]
287
278
  tool_confirmation_mode=tool_confirmation_mode,
288
279
  event_handlers=event_handlers,
289
- commands=commands,
280
+ commands=all_commands,
290
281
  )
291
282
 
292
283
  # Store config for context creation
293
- self._agent_config = agent_config or NativeAgentConfig(name=name)
284
+ # Convert model to proper config type for NativeAgentConfig
294
285
 
286
+ config_model: AnyModelConfig
287
+ if isinstance(model, Model):
288
+ config_model = StringModelConfig(
289
+ identifier=model.model_name,
290
+ **({"model_settings": model._settings} if model._settings else {}),
291
+ )
292
+ elif isinstance(model, str):
293
+ config_model = StringModelConfig(
294
+ identifier=model,
295
+ **({"model_settings": model_settings} if model_settings else {}),
296
+ )
297
+ else:
298
+ config_model = model
299
+ self._agent_config = agent_config or NativeAgentConfig(name=name, model=config_model)
295
300
  # Store builtin tools for pydantic-ai
296
301
  self._builtin_tools = list(builtin_tools) if builtin_tools else []
297
-
298
302
  # Override tools with Agent-specific ToolManager (with tools and tool_mode)
299
303
  all_tools = list(tools or [])
300
304
  self.tools = ToolManager(all_tools, tool_mode=tool_mode)
@@ -302,7 +306,6 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
302
306
  self.tools.add_provider(toolset_provider)
303
307
  aggregating_provider = self.mcp.get_aggregating_provider()
304
308
  self.tools.add_provider(aggregating_provider)
305
-
306
309
  # Override conversation with Agent-specific MessageHistory (with storage, etc.)
307
310
  resources = list(resources)
308
311
  if knowledge:
@@ -314,14 +317,17 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
314
317
  session_config=memory_cfg,
315
318
  resources=resources,
316
319
  )
317
- self._model = infer_model(model) if isinstance(model, str) else model
320
+ if isinstance(model, str):
321
+ self._model, settings = self._resolve_model_string(model)
322
+ if settings:
323
+ self.model_settings = settings
324
+ else:
325
+ self._model = model
318
326
  self._retries = retries
319
327
  self._end_strategy: EndStrategy = end_strategy
320
328
  self._output_retries = output_retries
321
329
  self.parallel_init = parallel_init
322
- self._background_task: asyncio.Task[ChatMessage[Any]] | None = None
323
330
  self.talk = Interactions(self)
324
-
325
331
  # Set up system prompts
326
332
  all_prompts: list[AnyPromptType] = []
327
333
  if isinstance(system_prompt, (list, tuple)):
@@ -329,13 +335,10 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
329
335
  elif system_prompt:
330
336
  all_prompts.append(system_prompt)
331
337
  self.sys_prompts = SystemPrompts(all_prompts, prompt_manager=self._manifest.prompt_manager)
332
-
333
338
  # Store hooks
334
339
  self.hooks = hooks
335
-
336
340
  # Store default usage limits
337
341
  self._default_usage_limits = usage_limits
338
-
339
342
  # Store providers for model discovery
340
343
  self._providers = list(providers) if providers else None
341
344
 
@@ -352,6 +355,143 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
352
355
  parts.extend([await self.tools.__prompt__(), self.conversation.__prompt__()])
353
356
  return "\n".join(parts)
354
357
 
358
+ @classmethod
359
+ def from_config( # noqa: PLR0915
360
+ cls,
361
+ config: NativeAgentConfig,
362
+ *,
363
+ name: str | None = None,
364
+ manifest: AgentsManifest | None = None,
365
+ event_handlers: Sequence[IndividualEventHandler | BuiltinEventHandlerType] | None = None,
366
+ input_provider: InputProvider | None = None,
367
+ agent_pool: AgentPool[Any] | None = None,
368
+ deps_type: type[TDeps] | None = None,
369
+ ) -> Self:
370
+ """Create a native Agent from a config object.
371
+
372
+ This is the preferred way to instantiate an Agent from configuration.
373
+ Handles system prompt resolution, model resolution, toolsets setup, etc.
374
+
375
+ Args:
376
+ config: Native agent configuration
377
+ name: Optional name override (used for manifest lookups, defaults to config.name)
378
+ manifest: Optional manifest for resolving prompts, models, output types.
379
+ If not provided, uses agent_pool.manifest or creates empty one.
380
+ event_handlers: Optional event handlers (merged with config handlers)
381
+ input_provider: Optional input provider for user interactions
382
+ agent_pool: Optional agent pool for coordination
383
+ deps_type: Optional dependency type
384
+
385
+ Returns:
386
+ Configured Agent instance
387
+ """
388
+ from pathlib import Path
389
+
390
+ from agentpool.models.manifest import AgentsManifest
391
+ from agentpool.utils.result_utils import to_type
392
+ from agentpool_config.system_prompts import (
393
+ FilePromptConfig,
394
+ FunctionPromptConfig,
395
+ LibraryPromptConfig,
396
+ StaticPromptConfig,
397
+ )
398
+
399
+ # Get manifest from pool or create empty one
400
+ if manifest is None:
401
+ manifest = agent_pool.manifest if agent_pool else AgentsManifest()
402
+
403
+ # Use provided name, fall back to config.name, then default
404
+ name = name or config.name or "agent"
405
+
406
+ # Normalize system_prompt to a list for iteration
407
+ sys_prompts: list[str] = []
408
+ prompt_source = config.system_prompt
409
+ if prompt_source is not None:
410
+ prompts_to_process = (
411
+ [prompt_source] if isinstance(prompt_source, str) else prompt_source
412
+ )
413
+ for prompt in prompts_to_process:
414
+ match prompt:
415
+ case (str() as sys_prompt) | StaticPromptConfig(content=sys_prompt):
416
+ sys_prompts.append(sys_prompt)
417
+ case FilePromptConfig(path=path, variables=variables):
418
+ template_path = Path(path)
419
+ if not template_path.is_absolute() and config.config_file_path:
420
+ template_path = Path(config.config_file_path).parent / path
421
+ template_content = template_path.read_text("utf-8")
422
+ if variables:
423
+ from jinja2 import Template
424
+
425
+ template = Template(template_content)
426
+ content = template.render(**variables)
427
+ else:
428
+ content = template_content
429
+ sys_prompts.append(content)
430
+ case LibraryPromptConfig(reference=reference):
431
+ try:
432
+ content = manifest.prompt_manager.get.sync(reference)
433
+ sys_prompts.append(content)
434
+ except Exception as e:
435
+ msg = f"Failed to load library prompt {reference!r} for agent {name}"
436
+ logger.exception(msg)
437
+ raise ValueError(msg) from e
438
+ case FunctionPromptConfig(function=function, arguments=arguments):
439
+ content = function(**arguments)
440
+ sys_prompts.append(content)
441
+
442
+ # Prepare toolsets list
443
+ toolsets_list = config.get_toolsets()
444
+ if config_tool_provider := config.get_tool_provider():
445
+ toolsets_list.append(config_tool_provider)
446
+ # Convert workers config to a toolset (backwards compatibility)
447
+ if config.workers:
448
+ from agentpool_toolsets.builtin.workers import WorkersTools
449
+
450
+ workers_provider = WorkersTools(workers=list(config.workers), name="workers")
451
+ toolsets_list.append(workers_provider)
452
+ # Resolve output type
453
+ agent_output_type = manifest.get_output_type(name) or str
454
+ resolved_output_type = to_type(agent_output_type, manifest.responses)
455
+ # Merge event handlers
456
+ config_handlers = config.get_event_handlers()
457
+ merged_handlers: list[IndividualEventHandler | BuiltinEventHandlerType] = [
458
+ *config_handlers,
459
+ *(event_handlers or []),
460
+ ]
461
+ # Resolve model
462
+ resolved_model = manifest.resolve_model(config.model)
463
+ model = resolved_model.get_model()
464
+ model_settings = resolved_model.get_model_settings()
465
+ # Extract builtin tools
466
+ builtin_tools = config.get_builtin_tools()
467
+ return cls(
468
+ model=model,
469
+ model_settings=model_settings,
470
+ system_prompt=sys_prompts,
471
+ name=name,
472
+ display_name=config.display_name,
473
+ deps_type=deps_type,
474
+ env=config.environment.get_provider() if config.environment else None,
475
+ description=config.description,
476
+ retries=config.retries,
477
+ session=config.get_session_config(),
478
+ output_retries=config.output_retries,
479
+ end_strategy=config.end_strategy,
480
+ agent_config=config,
481
+ input_provider=input_provider,
482
+ output_type=resolved_output_type, # type: ignore[arg-type]
483
+ event_handlers=merged_handlers or None,
484
+ agent_pool=agent_pool,
485
+ tool_mode=config.tool_mode,
486
+ knowledge=config.knowledge,
487
+ toolsets=toolsets_list,
488
+ hooks=config.hooks.get_agent_hooks() if config.hooks else None,
489
+ tool_confirmation_mode=config.requires_tool_confirmation,
490
+ builtin_tools=builtin_tools or None,
491
+ usage_limits=config.usage_limits,
492
+ providers=config.model_providers,
493
+ )
494
+
355
495
  async def __aenter__(self) -> Self:
356
496
  """Enter async context and set up MCP servers."""
357
497
  try:
@@ -379,57 +519,6 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
379
519
  """Exit async context."""
380
520
  await super().__aexit__(exc_type, exc_val, exc_tb)
381
521
 
382
- @overload
383
- def __and__( # if other doesnt define deps, we take the agents one
384
- self, other: ProcessorCallback[Any] | Team[TDeps] | Agent[TDeps, Any]
385
- ) -> Team[TDeps]: ...
386
-
387
- @overload
388
- def __and__( # otherwise, we dont know and deps is Any
389
- self, other: ProcessorCallback[Any] | Team[Any] | Agent[Any, Any]
390
- ) -> Team[Any]: ...
391
-
392
- def __and__(self, other: MessageNode[Any, Any] | ProcessorCallback[Any]) -> Team[Any]:
393
- """Create sequential team using & operator.
394
-
395
- Example:
396
- group = analyzer & planner & executor # Create group of 3
397
- group = analyzer & existing_group # Add to existing group
398
- """
399
- from agentpool.delegation.team import Team
400
-
401
- match other:
402
- case Team():
403
- return Team([self, *other.nodes])
404
- case Callable():
405
- agent_2 = Agent.from_callback(other)
406
- agent_2.agent_pool = self.agent_pool
407
- return Team([self, agent_2])
408
- case MessageNode():
409
- return Team([self, other])
410
- case _:
411
- msg = f"Invalid agent type: {type(other)}"
412
- raise ValueError(msg)
413
-
414
- @overload
415
- def __or__(self, other: MessageNode[TDeps, Any]) -> TeamRun[TDeps, Any]: ...
416
-
417
- @overload
418
- def __or__[TOtherDeps](self, other: MessageNode[TOtherDeps, Any]) -> TeamRun[Any, Any]: ...
419
-
420
- @overload
421
- def __or__(self, other: ProcessorCallback[Any]) -> TeamRun[Any, Any]: ...
422
-
423
- def __or__(self, other: MessageNode[Any, Any] | ProcessorCallback[Any]) -> TeamRun[Any, Any]:
424
- # Create new execution with sequential mode (for piping)
425
- from agentpool import TeamRun
426
-
427
- if callable(other):
428
- other = Agent.from_callback(other)
429
- other.agent_pool = self.agent_pool
430
-
431
- return TeamRun([self, other])
432
-
433
522
  @overload
434
523
  @classmethod
435
524
  def from_callback(
@@ -468,16 +557,18 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
468
557
  name: Optional name for the agent
469
558
  kwargs: Additional arguments for agent
470
559
  """
560
+ from llmling_models import function_to_model
561
+
471
562
  name = name or callback.__name__ or "processor"
472
563
  model = function_to_model(callback)
473
- return_type = _typing_extra.get_function_type_hints(callback).get("return")
564
+ output_type = _typing_extra.get_function_type_hints(callback).get("return")
474
565
  if ( # If async, unwrap from Awaitable
475
- return_type
476
- and hasattr(return_type, "__origin__")
477
- and return_type.__origin__ is Awaitable
566
+ output_type
567
+ and hasattr(output_type, "__origin__")
568
+ and output_type.__origin__ is Awaitable
478
569
  ):
479
- return_type = return_type.__args__[0]
480
- return Agent(model=model, name=name, output_type=return_type or str, **kwargs)
570
+ output_type = output_type.__args__[0]
571
+ return Agent(model=model, name=name, output_type=output_type or str, **kwargs)
481
572
 
482
573
  @property
483
574
  def name(self) -> str:
@@ -509,15 +600,34 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
509
600
  data=data,
510
601
  )
511
602
 
603
+ def _resolve_model_string(self, model: str) -> tuple[Model, ModelSettings | None]:
604
+ """Resolve a model string, checking variants first.
605
+
606
+ Args:
607
+ model: Model identifier or variant name
608
+
609
+ Returns:
610
+ Tuple of (Model instance, ModelSettings or None)
611
+ Settings are only returned for variants.
612
+ """
613
+ from llmling_models import infer_model
614
+
615
+ # Check if it's a variant
616
+ if self.agent_pool and model in self.agent_pool.manifest.model_variants:
617
+ config = self.agent_pool.manifest.model_variants[model]
618
+ return config.get_model(), config.get_model_settings()
619
+ # Regular model string - no settings
620
+ return infer_model(model), None
621
+
512
622
  def to_structured[NewOutputDataT](
513
623
  self,
514
624
  output_type: type[NewOutputDataT],
515
- *,
516
- tool_name: str | None = None,
517
- tool_description: str | None = None,
518
625
  ) -> Agent[TDeps, NewOutputDataT]:
519
626
  """Convert this agent to a structured agent.
520
627
 
628
+ Warning: This method mutates the agent in place and breaks caching.
629
+ Changing output type modifies tool definitions sent to the API.
630
+
521
631
  Args:
522
632
  output_type: Type for structured responses. Can be:
523
633
  - A Python type (Pydantic model)
@@ -525,23 +635,17 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
525
635
  tool_description: Optional override for result tool description
526
636
 
527
637
  Returns:
528
- Typed Agent
638
+ Self (same instance, not a copy)
529
639
  """
530
640
  self.log.debug("Setting result type", output_type=output_type)
531
641
  self._output_type = to_type(output_type) # type: ignore[assignment]
532
642
  return self # type: ignore
533
643
 
534
- def is_busy(self) -> bool:
535
- """Check if agent is currently processing tasks."""
536
- return bool(self.task_manager._pending_tasks or self._background_task)
537
-
538
644
  @property
539
645
  def model_name(self) -> str | None:
540
646
  """Get the model name in a consistent format (provider:model_name)."""
541
- if self._model:
542
- # Construct full model ID with provider prefix (e.g., "anthropic:claude-haiku-4-5")
543
- return f"{self._model.system}:{self._model.model_name}"
544
- return None
647
+ # Construct full model ID with provider prefix (e.g., "anthropic:claude-haiku-4-5")
648
+ return f"{self._model.system}:{self._model.model_name}" if self._model else None
545
649
 
546
650
  def to_tool(
547
651
  self,
@@ -552,7 +656,7 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
552
656
  pass_message_history: bool = False,
553
657
  parent: Agent[Any, Any] | None = None,
554
658
  **_kwargs: Any,
555
- ) -> Tool[OutputDataT]:
659
+ ) -> FunctionTool[OutputDataT]:
556
660
  """Create a tool from this agent.
557
661
 
558
662
  Args:
@@ -569,7 +673,7 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
569
673
  raise ToolError(msg)
570
674
 
571
675
  if reset_history_on_run:
572
- self.conversation.clear()
676
+ await self.conversation.clear()
573
677
 
574
678
  history = None
575
679
  if pass_message_history and parent:
@@ -602,8 +706,7 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
602
706
 
603
707
  async def get_agentlet[AgentOutputType](
604
708
  self,
605
- tool_choice: str | list[str] | None,
606
- model: ModelType,
709
+ model: ModelType | None,
607
710
  output_type: type[AgentOutputType] | None,
608
711
  input_provider: InputProvider | None = None,
609
712
  ) -> PydanticAgent[TDeps, AgentOutputType]:
@@ -612,10 +715,13 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
612
715
 
613
716
  from agentpool.agents.tool_wrapping import wrap_tool
614
717
 
615
- tools = await self.tools.get_tools(state="enabled", names=tool_choice)
718
+ tools = await self.tools.get_tools(state="enabled")
616
719
  final_type = to_type(output_type) if output_type not in [None, str] else self._output_type
617
720
  actual_model = model or self._model
618
- model_ = infer_model(actual_model) if isinstance(actual_model, str) else actual_model
721
+ if isinstance(actual_model, str):
722
+ model_, _settings = self._resolve_model_string(actual_model)
723
+ else:
724
+ model_ = actual_model
619
725
  agent = PydanticAgent(
620
726
  name=self.name,
621
727
  model=model_,
@@ -647,162 +753,30 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
647
753
 
648
754
  return agent # type: ignore[return-value]
649
755
 
650
- @overload
651
- async def run(
756
+ async def _stream_events( # noqa: PLR0915
652
757
  self,
653
- *prompts: PromptCompatible | ChatMessage[Any],
654
- output_type: None = None,
655
- model: ModelType = None,
656
- store_history: bool = True,
657
- tool_choice: str | list[str] | None = None,
658
- usage_limits: UsageLimits | None = None,
659
- message_id: str | None = None,
660
- conversation_id: str | None = None,
661
- message_history: MessageHistory | None = None,
662
- deps: TDeps | None = None,
663
- input_provider: InputProvider | None = None,
664
- wait_for_connections: bool | None = None,
665
- instructions: str | None = None,
666
- ) -> ChatMessage[OutputDataT]: ...
667
-
668
- @overload
669
- async def run[OutputTypeT](
670
- self,
671
- *prompts: PromptCompatible | ChatMessage[Any],
672
- output_type: type[OutputTypeT],
673
- model: ModelType = None,
674
- store_history: bool = True,
675
- tool_choice: str | list[str] | None = None,
676
- usage_limits: UsageLimits | None = None,
677
- message_id: str | None = None,
678
- conversation_id: str | None = None,
679
- message_history: MessageHistory | None = None,
680
- deps: TDeps | None = None,
681
- input_provider: InputProvider | None = None,
682
- wait_for_connections: bool | None = None,
683
- instructions: str | None = None,
684
- ) -> ChatMessage[OutputTypeT]: ...
685
-
686
- @method_spawner # type: ignore[misc]
687
- async def run(
688
- self,
689
- *prompts: PromptCompatible | ChatMessage[Any],
690
- output_type: type[Any] | None = None,
691
- model: ModelType = None,
692
- store_history: bool = True,
693
- tool_choice: str | list[str] | None = None,
694
- usage_limits: UsageLimits | None = None,
695
- message_id: str | None = None,
696
- conversation_id: str | None = None,
697
- message_history: MessageHistory | None = None,
698
- deps: TDeps | None = None,
699
- input_provider: InputProvider | None = None,
700
- wait_for_connections: bool | None = None,
701
- instructions: str | None = None,
702
- ) -> ChatMessage[Any]:
703
- """Run agent with prompt and get response.
704
-
705
- Args:
706
- prompts: User query or instruction
707
- output_type: Optional type for structured responses
708
- model: Optional model override
709
- store_history: Whether the message exchange should be added to the
710
- context window
711
- tool_choice: Filter tool choice by name
712
- usage_limits: Optional usage limits for the model
713
- message_id: Optional message id for the returned message.
714
- Automatically generated if not provided.
715
- conversation_id: Optional conversation id for the returned message.
716
- message_history: Optional MessageHistory object to
717
- use instead of agent's own conversation
718
- deps: Optional dependencies for the agent
719
- input_provider: Optional input provider for the agent
720
- wait_for_connections: Whether to wait for connected agents to complete
721
- instructions: Optional instructions to override the agent's system prompt
722
-
723
- Returns:
724
- Result containing response and run information
725
-
726
- Raises:
727
- UnexpectedModelBehavior: If the model fails or behaves unexpectedly
728
- """
729
- # Collect all events through run_stream
730
- final_message: ChatMessage[Any] | None = None
731
- async for event in self.run_stream(
732
- *prompts,
733
- output_type=output_type,
734
- model=model,
735
- store_history=store_history,
736
- tool_choice=tool_choice,
737
- usage_limits=usage_limits,
738
- message_id=message_id,
739
- conversation_id=conversation_id,
740
- message_history=message_history,
741
- deps=deps,
742
- input_provider=input_provider,
743
- wait_for_connections=wait_for_connections,
744
- instructions=instructions,
745
- ):
746
- if isinstance(event, StreamCompleteEvent):
747
- final_message = event.message
748
-
749
- if final_message is None:
750
- msg = "No final message received from stream"
751
- raise RuntimeError(msg)
752
-
753
- return final_message
754
-
755
- @method_spawner
756
- async def run_stream( # noqa: PLR0915
757
- self,
758
- *prompt: PromptCompatible,
759
- output_type: type[OutputDataT] | None = None,
760
- model: ModelType = None,
761
- tool_choice: str | list[str] | None = None,
758
+ prompts: list[UserContent],
759
+ *,
760
+ user_msg: ChatMessage[Any],
761
+ effective_parent_id: str | None,
762
762
  store_history: bool = True,
763
- usage_limits: UsageLimits | None = None,
764
763
  message_id: str | None = None,
765
764
  conversation_id: str | None = None,
765
+ parent_id: str | None = None,
766
766
  message_history: MessageHistory | None = None,
767
767
  input_provider: InputProvider | None = None,
768
768
  wait_for_connections: bool | None = None,
769
769
  deps: TDeps | None = None,
770
- instructions: str | None = None,
771
770
  event_handlers: Sequence[IndividualEventHandler | BuiltinEventHandlerType] | None = None,
772
771
  ) -> AsyncIterator[RichAgentStreamEvent[OutputDataT]]:
773
- """Run agent with prompt and get a streaming response.
774
-
775
- Args:
776
- prompt: User query or instruction
777
- output_type: Optional type for structured responses
778
- model: Optional model override
779
- tool_choice: Filter tool choice by name
780
- store_history: Whether the message exchange should be added to the
781
- context window
782
- usage_limits: Optional usage limits for the model
783
- message_id: Optional message id for the returned message.
784
- Automatically generated if not provided.
785
- conversation_id: Optional conversation id for the returned message.
786
- message_history: Optional MessageHistory to use instead of agent's own
787
- input_provider: Optional input provider for the agent
788
- wait_for_connections: Whether to wait for connected agents to complete
789
- deps: Optional dependencies for the agent
790
- instructions: Optional instructions to override the agent's system prompt
791
- event_handlers: Optional event handlers for this run (overrides agent's handlers)
772
+ from anyenv import MultiEventHandler
773
+ from pydantic_graph import End
792
774
 
793
- Returns:
794
- An async iterator yielding streaming events with final message embedded.
775
+ from agentpool.agents.events import resolve_event_handlers
795
776
 
796
- Raises:
797
- UnexpectedModelBehavior: If the model fails or behaves unexpectedly
798
- """
799
777
  conversation = message_history if message_history is not None else self.conversation
800
778
  # Use provided event handlers or fall back to agent's handlers
801
779
  if event_handlers is not None:
802
- from anyenv import MultiEventHandler
803
-
804
- from agentpool.agents.events import resolve_event_handlers
805
-
806
780
  handler: MultiEventHandler[IndividualEventHandler] = MultiEventHandler(
807
781
  resolve_event_handlers(event_handlers)
808
782
  )
@@ -810,18 +784,15 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
810
784
  handler = self.event_handler
811
785
  message_id = message_id or str(uuid4())
812
786
  run_id = str(uuid4())
813
- # Get parent_id from last message in history for tree structure
814
- last_msg_id = conversation.get_last_message_id()
815
- user_msg, prompts, original_message = await prepare_prompts(*prompt, parent_id=last_msg_id)
816
- self.message_received.emit(user_msg)
787
+ # Reset cancellation state
788
+ self._cancelled = False
789
+ # Initialize conversation_id on first run and log to storage
790
+ # Conversation ID initialization handled by BaseAgent
791
+ processed_prompts = prompts
792
+ await self.message_received.emit(user_msg)
817
793
  start_time = time.perf_counter()
818
794
  history_list = conversation.get_history()
819
795
  pending_parts = conversation.get_pending_parts()
820
-
821
- # Reset cancellation state and track current task
822
- self._cancelled = False
823
- self._current_stream_task = asyncio.current_task()
824
-
825
796
  # Execute pre-run hooks
826
797
  if self.hooks:
827
798
  pre_run_result = await self.hooks.run_pre_run_hooks(
@@ -829,159 +800,132 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
829
800
  prompt=user_msg.content
830
801
  if isinstance(user_msg.content, str)
831
802
  else str(user_msg.content),
832
- conversation_id=conversation_id,
803
+ conversation_id=self.conversation_id,
833
804
  )
834
805
  if pre_run_result.get("decision") == "deny":
835
806
  reason = pre_run_result.get("reason", "Blocked by pre-run hook")
836
807
  msg = f"Run blocked: {reason}"
837
808
  raise RuntimeError(msg)
838
809
 
810
+ assert self.conversation_id is not None # Initialized by BaseAgent.run_stream()
839
811
  run_started = RunStartedEvent(
840
812
  thread_id=self.conversation_id, run_id=run_id, agent_name=self.name
841
813
  )
842
814
  await handler(None, run_started)
843
815
  yield run_started
844
- try:
845
- from pydantic_graph import End
846
-
847
- agentlet = await self.get_agentlet(tool_choice, model, output_type, input_provider)
848
- content = await convert_prompts(prompts)
849
- response_msg: ChatMessage[Any] | None = None
850
- # Prepend pending context parts (content is already pydantic-ai format)
851
- converted = [*pending_parts, *content]
852
-
853
- # Use provided usage_limits or fall back to default
854
- effective_limits = usage_limits or self._default_usage_limits
855
- history = [m for run in history_list for m in run.to_pydantic_ai()]
856
-
857
- # Track tool call starts to combine with results later
858
- pending_tcs: dict[str, BaseToolCallPart] = {}
859
- file_tracker = FileTracker()
860
-
861
- async with agentlet.iter(
862
- converted,
863
- deps=deps, # type: ignore[arg-type]
864
- message_history=history,
865
- usage_limits=effective_limits,
866
- instructions=instructions,
867
- ) as agent_run:
868
- try:
869
- async for node in agent_run:
870
- if self._cancelled:
871
- self.log.info("Stream cancelled by user")
872
- break
873
- if isinstance(node, End):
874
- break
875
-
876
- # Stream events from model request node
877
- if isinstance(node, ModelRequestNode):
878
- async with (
879
- node.stream(agent_run.ctx) as agent_stream,
880
- merge_queue_into_iterator(
881
- agent_stream, # type: ignore[arg-type]
882
- self._event_queue,
883
- ) as merged,
884
- ):
885
- async for event in file_tracker.track(merged):
886
- if self._cancelled:
887
- break
888
- await handler(None, event)
889
- yield event
890
- combined = self._process_tool_event(
891
- event, pending_tcs, message_id
892
- )
893
- if combined:
894
- await handler(None, combined)
895
- yield combined
896
-
897
- # Stream events from tool call node
898
- elif isinstance(node, CallToolsNode):
899
- async with (
900
- node.stream(agent_run.ctx) as tool_stream,
901
- merge_queue_into_iterator(tool_stream, self._event_queue) as merged,
902
- ):
903
- async for event in file_tracker.track(merged):
904
- if self._cancelled:
905
- break
906
- await handler(None, event)
907
- yield event
908
- combined = self._process_tool_event(
909
- event, pending_tcs, message_id
910
- )
911
- if combined:
912
- await handler(None, combined)
913
- yield combined
914
- except asyncio.CancelledError:
915
- self.log.info("Stream cancelled via task cancellation")
916
- self._cancelled = True
917
-
918
- # Build response message
919
- response_time = time.perf_counter() - start_time
920
- if self._cancelled:
921
- partial_content = _extract_text_from_messages(
922
- agent_run.all_messages(), include_interruption_note=True
923
- )
924
- response_msg = ChatMessage(
925
- content=partial_content,
926
- role="assistant",
927
- name=self.name,
928
- message_id=message_id,
929
- conversation_id=conversation_id or user_msg.conversation_id,
930
- parent_id=user_msg.message_id,
931
- response_time=response_time,
932
- finish_reason="stop",
933
- )
934
- complete_event = StreamCompleteEvent(message=response_msg)
935
- await handler(None, complete_event)
936
- yield complete_event
937
- self._current_stream_task = None
938
- return
939
-
940
- if agent_run.result:
941
- response_msg = await ChatMessage.from_run_result(
942
- agent_run.result,
943
- agent_name=self.name,
944
- message_id=message_id,
945
- conversation_id=conversation_id or user_msg.conversation_id,
946
- parent_id=user_msg.message_id,
947
- response_time=response_time,
948
- metadata=file_tracker.get_metadata(),
949
- )
950
- else:
951
- msg = "Stream completed without producing a result"
952
- raise RuntimeError(msg) # noqa: TRY301
953
816
 
954
- # Execute post-run hooks
955
- if self.hooks:
956
- prompt_str = (
957
- user_msg.content if isinstance(user_msg.content, str) else str(user_msg.content)
817
+ agentlet = await self.get_agentlet(None, self._output_type, input_provider)
818
+ content = await convert_prompts(processed_prompts)
819
+ response_msg: ChatMessage[Any] | None = None
820
+ # Prepend pending context parts (content is already pydantic-ai format)
821
+ converted = [*pending_parts, *content]
822
+ history = [m for run in history_list for m in run.to_pydantic_ai()]
823
+ # Track tool call starts to combine with results later
824
+ pending_tcs: dict[str, BaseToolCallPart] = {}
825
+ file_tracker = FileTracker()
826
+ async with agentlet.iter(
827
+ converted,
828
+ deps=deps, # type: ignore[arg-type]
829
+ message_history=history,
830
+ usage_limits=self._default_usage_limits,
831
+ ) as agent_run:
832
+ try:
833
+ async for node in agent_run:
834
+ if self._cancelled:
835
+ self.log.info("Stream cancelled by user")
836
+ break
837
+ if isinstance(node, End):
838
+ break
839
+
840
+ # Stream events from model request node
841
+ if isinstance(node, ModelRequestNode):
842
+ async with (
843
+ node.stream(agent_run.ctx) as agent_stream,
844
+ merge_queue_into_iterator(
845
+ agent_stream, # type: ignore[arg-type]
846
+ self._event_queue,
847
+ ) as merged,
848
+ ):
849
+ async for event in file_tracker(merged):
850
+ if self._cancelled:
851
+ break
852
+ await handler(None, event)
853
+ yield event
854
+ combined = self._process_tool_event(event, pending_tcs, message_id)
855
+ if combined:
856
+ await handler(None, combined)
857
+ yield combined
858
+
859
+ # Stream events from tool call node
860
+ elif isinstance(node, CallToolsNode):
861
+ async with (
862
+ node.stream(agent_run.ctx) as tool_stream,
863
+ merge_queue_into_iterator(tool_stream, self._event_queue) as merged,
864
+ ):
865
+ async for event in file_tracker(merged):
866
+ if self._cancelled:
867
+ break
868
+ await handler(None, event)
869
+ yield event
870
+ combined = self._process_tool_event(event, pending_tcs, message_id)
871
+ if combined:
872
+ await handler(None, combined)
873
+ yield combined
874
+ except asyncio.CancelledError:
875
+ self.log.info("Stream cancelled via task cancellation")
876
+ self._cancelled = True
877
+
878
+ # Build response message
879
+ response_time = time.perf_counter() - start_time
880
+ if self._cancelled:
881
+ partial_content = _extract_text_from_messages(
882
+ agent_run.all_messages(), include_interruption_note=True
883
+ )
884
+ response_msg = ChatMessage(
885
+ content=partial_content,
886
+ role="assistant",
887
+ name=self.name,
888
+ message_id=message_id,
889
+ conversation_id=self.conversation_id,
890
+ parent_id=user_msg.message_id,
891
+ response_time=response_time,
892
+ finish_reason="stop",
958
893
  )
959
- await self.hooks.run_post_run_hooks(
894
+ complete_event = StreamCompleteEvent(message=response_msg)
895
+ await handler(None, complete_event)
896
+ yield complete_event
897
+ return
898
+
899
+ if agent_run.result:
900
+ response_msg = await ChatMessage.from_run_result(
901
+ agent_run.result,
960
902
  agent_name=self.name,
961
- prompt=prompt_str,
962
- result=response_msg.content,
963
- conversation_id=conversation_id,
903
+ message_id=message_id,
904
+ conversation_id=self.conversation_id,
905
+ parent_id=user_msg.message_id,
906
+ response_time=response_time,
907
+ metadata=file_tracker.get_metadata(),
964
908
  )
909
+ else:
910
+ msg = "Stream completed without producing a result"
911
+ raise RuntimeError(msg)
965
912
 
966
- # Apply forwarding logic if needed
967
- if original_message:
968
- response_msg = response_msg.forwarded(original_message)
969
- # Send additional enriched completion event
970
- complete_event = StreamCompleteEvent(message=response_msg)
971
- await handler(None, complete_event)
972
- yield complete_event
973
- self.message_sent.emit(response_msg)
974
- await self.log_message(response_msg)
975
- if store_history:
976
- conversation.add_chat_messages([user_msg, response_msg])
977
- await self.connections.route_message(response_msg, wait=wait_for_connections)
913
+ # Execute post-run hooks
914
+ if self.hooks:
915
+ prompt_str = (
916
+ user_msg.content if isinstance(user_msg.content, str) else str(user_msg.content)
917
+ )
918
+ await self.hooks.run_post_run_hooks(
919
+ agent_name=self.name,
920
+ prompt=prompt_str,
921
+ result=response_msg.content,
922
+ conversation_id=self.conversation_id,
923
+ )
978
924
 
979
- except Exception as e:
980
- self.log.exception("Agent stream failed")
981
- self.run_failed.emit("Agent stream failed", e)
982
- raise
983
- finally:
984
- self._current_stream_task = None
925
+ # Send additional enriched completion event
926
+ complete_event = StreamCompleteEvent(message=response_msg)
927
+ await handler(None, complete_event)
928
+ yield complete_event
985
929
 
986
930
  def _process_tool_event(
987
931
  self,
@@ -1018,45 +962,6 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1018
962
  )
1019
963
  return None
1020
964
 
1021
- async def run_iter(
1022
- self,
1023
- *prompt_groups: Sequence[PromptCompatible],
1024
- output_type: type[OutputDataT] | None = None,
1025
- model: ModelType = None,
1026
- store_history: bool = True,
1027
- wait_for_connections: bool | None = None,
1028
- ) -> AsyncIterator[ChatMessage[OutputDataT]]:
1029
- """Run agent sequentially on multiple prompt groups.
1030
-
1031
- Args:
1032
- prompt_groups: Groups of prompts to process sequentially
1033
- output_type: Optional type for structured responses
1034
- model: Optional model override
1035
- store_history: Whether to store in conversation history
1036
- wait_for_connections: Whether to wait for connected agents
1037
-
1038
- Yields:
1039
- Response messages in sequence
1040
-
1041
- Example:
1042
- questions = [
1043
- ["What is your name?"],
1044
- ["How old are you?", image1],
1045
- ["Describe this image", image2],
1046
- ]
1047
- async for response in agent.run_iter(*questions):
1048
- print(response.content)
1049
- """
1050
- for prompts in prompt_groups:
1051
- response = await self.run(
1052
- *prompts,
1053
- output_type=output_type,
1054
- model=model,
1055
- store_history=store_history,
1056
- wait_for_connections=wait_for_connections,
1057
- )
1058
- yield response # pyright: ignore
1059
-
1060
965
  @method_spawner
1061
966
  async def run_job(
1062
967
  self,
@@ -1109,122 +1014,6 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1109
1014
  msg = f"Task execution failed: {e}"
1110
1015
  raise JobError(msg) from e
1111
1016
 
1112
- async def run_in_background(
1113
- self,
1114
- *prompt: PromptCompatible,
1115
- max_count: int | None = None,
1116
- interval: float = 1.0,
1117
- **kwargs: Any,
1118
- ) -> asyncio.Task[ChatMessage[OutputDataT] | None]:
1119
- """Run agent continuously in background with prompt or dynamic prompt function.
1120
-
1121
- Args:
1122
- prompt: Static prompt or function that generates prompts
1123
- max_count: Maximum number of runs (None = infinite)
1124
- interval: Seconds between runs
1125
- **kwargs: Arguments passed to run()
1126
- """
1127
- self._infinite = max_count is None
1128
-
1129
- async def _continuous() -> ChatMessage[Any]:
1130
- count = 0
1131
- self.log.debug("Starting continuous run", max_count=max_count, interval=interval)
1132
- latest = None
1133
- while (max_count is None or count < max_count) and not self._cancelled:
1134
- try:
1135
- agent_ctx = self.get_context()
1136
- current_prompts = [
1137
- call_with_context(p, agent_ctx, **kwargs) if callable(p) else p
1138
- for p in prompt
1139
- ]
1140
- self.log.debug("Generated prompt", iteration=count)
1141
- latest = await self.run(current_prompts, **kwargs)
1142
- self.log.debug("Run continuous result", iteration=count)
1143
-
1144
- count += 1
1145
- await anyio.sleep(interval)
1146
- except asyncio.CancelledError:
1147
- self.log.debug("Continuous run cancelled")
1148
- break
1149
- except Exception:
1150
- # Check if we were cancelled (may surface as other exceptions)
1151
- if self._cancelled:
1152
- self.log.debug("Continuous run cancelled via flag")
1153
- break
1154
- count += 1
1155
- self.log.exception("Background run failed")
1156
- await anyio.sleep(interval)
1157
- self.log.debug("Continuous run completed", iterations=count)
1158
- return latest # type: ignore[return-value]
1159
-
1160
- await self.stop() # Cancel any existing background task
1161
- self._cancelled = False # Reset cancellation flag for new run
1162
- task = asyncio.create_task(_continuous(), name=f"background_{self.name}")
1163
- self.log.debug("Started background task", task_name=task.get_name())
1164
- self._background_task = task
1165
- return task
1166
-
1167
- async def stop(self) -> None:
1168
- """Stop continuous execution if running."""
1169
- self._cancelled = True # Signal cancellation via flag
1170
- if self._background_task and not self._background_task.done():
1171
- self._background_task.cancel()
1172
- with suppress(asyncio.CancelledError): # Expected when we cancel the task
1173
- await self._background_task
1174
- self._background_task = None
1175
-
1176
- async def wait(self) -> ChatMessage[OutputDataT]:
1177
- """Wait for background execution to complete."""
1178
- if not self._background_task:
1179
- msg = "No background task running"
1180
- raise RuntimeError(msg)
1181
- if self._infinite:
1182
- msg = "Cannot wait on infinite execution"
1183
- raise RuntimeError(msg)
1184
- try:
1185
- return await self._background_task
1186
- finally:
1187
- self._background_task = None
1188
-
1189
- async def share(
1190
- self,
1191
- target: Agent[TDeps, Any],
1192
- *,
1193
- tools: list[str] | None = None,
1194
- history: bool | int | None = None, # bool or number of messages
1195
- token_limit: int | None = None,
1196
- ) -> None:
1197
- """Share capabilities and knowledge with another agent.
1198
-
1199
- Args:
1200
- target: Agent to share with
1201
- tools: List of tool names to share
1202
- history: Share conversation history:
1203
- - True: Share full history
1204
- - int: Number of most recent messages to share
1205
- - None: Don't share history
1206
- token_limit: Optional max tokens for history
1207
-
1208
- Raises:
1209
- ValueError: If requested items don't exist
1210
- RuntimeError: If runtime not available for resources
1211
- """
1212
- # Share tools if requested
1213
- for name in tools or []:
1214
- tool = await self.tools.get_tool(name)
1215
- meta = {"shared_from": self.name}
1216
- target.tools.register_tool(tool.callable, metadata=meta)
1217
-
1218
- # Share history if requested
1219
- if history:
1220
- history_text = await self.conversation.format_history(
1221
- max_tokens=token_limit,
1222
- num_messages=history if isinstance(history, int) else None,
1223
- )
1224
- target.conversation.add_context_message(
1225
- history_text, source=self.name, metadata={"type": "shared_history"}
1226
- )
1227
-
1228
1017
  def register_worker(
1229
1018
  self,
1230
1019
  worker: MessageNode[Any, Any],
@@ -1242,14 +1031,19 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1242
1031
  parent=self if pass_message_history else None,
1243
1032
  )
1244
1033
 
1245
- async def set_model(self, model: ModelType) -> None:
1034
+ async def set_model(self, model: Model | str) -> None:
1246
1035
  """Set the model for this agent.
1247
1036
 
1248
1037
  Args:
1249
1038
  model: New model to use (name or instance)
1250
1039
 
1251
1040
  """
1252
- self._model = infer_model(model) if isinstance(model, str) else model
1041
+ if isinstance(model, str):
1042
+ self._model, settings = self._resolve_model_string(model)
1043
+ if settings:
1044
+ self.model_settings = settings
1045
+ else:
1046
+ self._model = model
1253
1047
 
1254
1048
  async def set_tool_confirmation_mode(self, mode: ToolConfirmationMode) -> None:
1255
1049
  """Set the tool confirmation mode for this agent.
@@ -1263,20 +1057,6 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1263
1057
  self.tool_confirmation_mode = mode
1264
1058
  self.log.info("Tool confirmation mode changed", mode=mode)
1265
1059
 
1266
- async def reset(self) -> None:
1267
- """Reset agent state (conversation history and tool states)."""
1268
- old_tools = await self.tools.list_tools()
1269
- self.conversation.clear()
1270
- await self.tools.reset_states()
1271
- new_tools = await self.tools.list_tools()
1272
-
1273
- event = self.AgentReset(
1274
- agent_name=self.name,
1275
- previous_tools=old_tools,
1276
- new_tools=new_tools,
1277
- )
1278
- self.agent_reset.emit(event)
1279
-
1280
1060
  @asynccontextmanager
1281
1061
  async def temporary_state[T](
1282
1062
  self,
@@ -1305,6 +1085,7 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1305
1085
  model: Temporary model override
1306
1086
  """
1307
1087
  old_model = self._model
1088
+ old_settings = self.model_settings
1308
1089
  if output_type:
1309
1090
  old_type = self._output_type
1310
1091
  self.to_structured(output_type)
@@ -1327,14 +1108,21 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1327
1108
  if pause_routing: # Routing
1328
1109
  await stack.enter_async_context(self.connections.paused_routing())
1329
1110
 
1330
- elif model is not None: # Model
1331
- self._model = infer_model(model) if isinstance(model, str) else model
1111
+ if model is not None: # Model
1112
+ if isinstance(model, str):
1113
+ self._model, settings = self._resolve_model_string(model)
1114
+ if settings:
1115
+ self.model_settings = settings
1116
+ else:
1117
+ self._model = model
1332
1118
 
1333
1119
  try:
1334
1120
  yield self
1335
- finally: # Restore model
1336
- if model is not None and old_model:
1337
- self._model = old_model
1121
+ finally: # Restore model and settings
1122
+ if model is not None:
1123
+ if old_model:
1124
+ self._model = old_model
1125
+ self.model_settings = old_settings
1338
1126
  if output_type:
1339
1127
  self.to_structured(old_type)
1340
1128
 
@@ -1375,17 +1163,19 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1375
1163
  self.log.exception("Failed to discover models")
1376
1164
  return None
1377
1165
 
1378
- def get_modes(self) -> list[ModeCategory]:
1166
+ async def get_modes(self) -> list[ModeCategory]:
1379
1167
  """Get available mode categories for this agent.
1380
1168
 
1381
- Native agents expose tool confirmation modes.
1169
+ Native agents expose permission modes and model selection.
1382
1170
 
1383
1171
  Returns:
1384
- List with single ModeCategory for tool confirmation
1172
+ List of ModeCategory for permissions and models
1385
1173
  """
1386
1174
  from agentpool.agents.modes import ModeCategory, ModeInfo
1387
1175
 
1388
- # Map current confirmation mode to mode ID
1176
+ categories: list[ModeCategory] = []
1177
+
1178
+ # Permission modes
1389
1179
  mode_id_map = {
1390
1180
  "per_tool": "default",
1391
1181
  "always": "default",
@@ -1393,67 +1183,105 @@ class Agent[TDeps = None, OutputDataT = str](BaseAgent[TDeps, OutputDataT]):
1393
1183
  }
1394
1184
  current_id = mode_id_map.get(self.tool_confirmation_mode, "default")
1395
1185
 
1396
- category_id = "permissions"
1397
- return [
1186
+ categories.append(
1398
1187
  ModeCategory(
1399
- id=category_id,
1188
+ id="permissions",
1400
1189
  name="Permissions",
1401
1190
  available_modes=[
1402
1191
  ModeInfo(
1403
1192
  id="default",
1404
1193
  name="Default",
1405
1194
  description="Require confirmation for tools marked as needing it",
1406
- category_id=category_id,
1195
+ category_id="permissions",
1407
1196
  ),
1408
1197
  ModeInfo(
1409
1198
  id="acceptEdits",
1410
1199
  name="Accept Edits",
1411
1200
  description="Auto-approve all tool calls without confirmation",
1412
- category_id=category_id,
1201
+ category_id="permissions",
1413
1202
  ),
1414
1203
  ],
1415
1204
  current_mode_id=current_id,
1205
+ category="mode",
1416
1206
  )
1417
- ]
1207
+ )
1208
+
1209
+ # Model selection
1210
+ models = await self.get_available_models()
1211
+ if models:
1212
+ current_model = self.model_name or (models[0].id if models else "")
1213
+ categories.append(
1214
+ ModeCategory(
1215
+ id="model",
1216
+ name="Model",
1217
+ available_modes=[
1218
+ ModeInfo(
1219
+ id=m.id,
1220
+ name=m.name or m.id,
1221
+ description=m.description or "",
1222
+ category_id="model",
1223
+ )
1224
+ for m in models
1225
+ ],
1226
+ current_mode_id=current_model,
1227
+ category="model",
1228
+ )
1229
+ )
1230
+
1231
+ return categories
1418
1232
 
1419
1233
  async def set_mode(self, mode: ModeInfo | str, category_id: str | None = None) -> None:
1420
1234
  """Set a mode for this agent.
1421
1235
 
1422
- Native agents support the "permissions" category with modes:
1423
- - "default": per_tool confirmation
1424
- - "acceptEdits": never confirm (auto-approve)
1236
+ Native agents support:
1237
+ - "permissions" category: default, acceptEdits
1238
+ - "model" category: any available model ID
1425
1239
 
1426
1240
  Args:
1427
1241
  mode: Mode to activate - ModeInfo object or mode ID string
1428
- category_id: Optional category (only "permissions" supported)
1242
+ category_id: Category ID ("permissions" or "model")
1429
1243
 
1430
1244
  Raises:
1431
- ValueError: If mode_id is invalid
1245
+ ValueError: If mode_id or category_id is invalid
1432
1246
  """
1433
1247
  # Extract mode_id and category from ModeInfo if provided
1434
1248
  if isinstance(mode, ModeInfo):
1435
1249
  mode_id = mode.id
1436
- category_id = category_id or mode.category_id or None
1250
+ category_id = category_id or mode.category_id
1437
1251
  else:
1438
1252
  mode_id = mode
1439
1253
 
1440
- # Validate category if provided
1441
- if category_id is not None and category_id != "permissions":
1442
- msg = f"Unknown category: {category_id}. Only 'permissions' is supported."
1443
- raise ValueError(msg)
1444
-
1445
- # Map mode_id to confirmation mode
1446
- mode_map: dict[str, ToolConfirmationMode] = {
1447
- "default": "per_tool",
1448
- "acceptEdits": "never",
1449
- }
1254
+ # Default to permissions if no category specified
1255
+ if category_id is None:
1256
+ category_id = "permissions"
1257
+
1258
+ if category_id == "permissions":
1259
+ # Map mode_id to confirmation mode
1260
+ mode_map: dict[str, ToolConfirmationMode] = {
1261
+ "default": "per_tool",
1262
+ "acceptEdits": "never",
1263
+ }
1264
+ if mode_id not in mode_map:
1265
+ msg = f"Unknown permission mode: {mode_id}. Available: {list(mode_map.keys())}"
1266
+ raise ValueError(msg)
1267
+ await self.set_tool_confirmation_mode(mode_map[mode_id])
1268
+
1269
+ elif category_id == "model":
1270
+ # Validate model exists
1271
+ models = await self.get_available_models()
1272
+ if models:
1273
+ valid_ids = {m.id for m in models}
1274
+ if mode_id not in valid_ids:
1275
+ msg = f"Unknown model: {mode_id}. Available: {valid_ids}"
1276
+ raise ValueError(msg)
1277
+ # Set the model using set_model method
1278
+ await self.set_model(mode_id)
1279
+ self.log.info("Model changed", model=mode_id)
1450
1280
 
1451
- if mode_id not in mode_map:
1452
- msg = f"Unknown mode: {mode_id}. Available: {list(mode_map.keys())}"
1281
+ else:
1282
+ msg = f"Unknown category: {category_id}. Available: permissions, model"
1453
1283
  raise ValueError(msg)
1454
1284
 
1455
- await self.set_tool_confirmation_mode(mode_map[mode_id])
1456
-
1457
1285
 
1458
1286
  if __name__ == "__main__":
1459
1287
  import logging
@@ -1468,4 +1296,4 @@ if __name__ == "__main__":
1468
1296
  print(f"[EVENT] {type(event).__name__}: {event}")
1469
1297
 
1470
1298
  agent = Agent(model=_model, tools=["webbrowser.open"], event_handlers=[handle_events])
1471
- result = agent.run.sync(sys_prompt) # type: ignore[attr-defined]
1299
+ result = agent.run.sync(sys_prompt)