newcode 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (289) hide show
  1. code_puppy/__init__.py +10 -0
  2. code_puppy/__main__.py +10 -0
  3. code_puppy/agents/__init__.py +31 -0
  4. code_puppy/agents/agent_c_reviewer.py +155 -0
  5. code_puppy/agents/agent_code_puppy.py +147 -0
  6. code_puppy/agents/agent_code_reviewer.py +90 -0
  7. code_puppy/agents/agent_cpp_reviewer.py +132 -0
  8. code_puppy/agents/agent_creator_agent.py +630 -0
  9. code_puppy/agents/agent_golang_reviewer.py +151 -0
  10. code_puppy/agents/agent_helios.py +122 -0
  11. code_puppy/agents/agent_javascript_reviewer.py +160 -0
  12. code_puppy/agents/agent_manager.py +742 -0
  13. code_puppy/agents/agent_pack_leader.py +380 -0
  14. code_puppy/agents/agent_planning.py +165 -0
  15. code_puppy/agents/agent_python_programmer.py +167 -0
  16. code_puppy/agents/agent_python_reviewer.py +90 -0
  17. code_puppy/agents/agent_qa_expert.py +163 -0
  18. code_puppy/agents/agent_qa_kitten.py +208 -0
  19. code_puppy/agents/agent_scheduler.py +121 -0
  20. code_puppy/agents/agent_security_auditor.py +181 -0
  21. code_puppy/agents/agent_terminal_qa.py +323 -0
  22. code_puppy/agents/agent_typescript_reviewer.py +166 -0
  23. code_puppy/agents/base_agent.py +2145 -0
  24. code_puppy/agents/event_stream_handler.py +348 -0
  25. code_puppy/agents/json_agent.py +202 -0
  26. code_puppy/agents/pack/__init__.py +34 -0
  27. code_puppy/agents/pack/bloodhound.py +296 -0
  28. code_puppy/agents/pack/husky.py +307 -0
  29. code_puppy/agents/pack/retriever.py +380 -0
  30. code_puppy/agents/pack/shepherd.py +327 -0
  31. code_puppy/agents/pack/terrier.py +281 -0
  32. code_puppy/agents/pack/watchdog.py +357 -0
  33. code_puppy/agents/prompt_reviewer.py +145 -0
  34. code_puppy/agents/subagent_stream_handler.py +276 -0
  35. code_puppy/api/__init__.py +13 -0
  36. code_puppy/api/app.py +169 -0
  37. code_puppy/api/main.py +21 -0
  38. code_puppy/api/pty_manager.py +453 -0
  39. code_puppy/api/routers/__init__.py +12 -0
  40. code_puppy/api/routers/agents.py +36 -0
  41. code_puppy/api/routers/commands.py +217 -0
  42. code_puppy/api/routers/config.py +75 -0
  43. code_puppy/api/routers/sessions.py +234 -0
  44. code_puppy/api/templates/terminal.html +361 -0
  45. code_puppy/api/websocket.py +154 -0
  46. code_puppy/callbacks.py +674 -0
  47. code_puppy/chatgpt_codex_client.py +338 -0
  48. code_puppy/claude_cache_client.py +664 -0
  49. code_puppy/cli_runner.py +1038 -0
  50. code_puppy/command_line/__init__.py +1 -0
  51. code_puppy/command_line/add_model_menu.py +1092 -0
  52. code_puppy/command_line/agent_menu.py +662 -0
  53. code_puppy/command_line/attachments.py +395 -0
  54. code_puppy/command_line/autosave_menu.py +704 -0
  55. code_puppy/command_line/clipboard.py +527 -0
  56. code_puppy/command_line/colors_menu.py +526 -0
  57. code_puppy/command_line/command_handler.py +283 -0
  58. code_puppy/command_line/command_registry.py +150 -0
  59. code_puppy/command_line/config_commands.py +719 -0
  60. code_puppy/command_line/core_commands.py +853 -0
  61. code_puppy/command_line/diff_menu.py +865 -0
  62. code_puppy/command_line/file_path_completion.py +73 -0
  63. code_puppy/command_line/load_context_completion.py +52 -0
  64. code_puppy/command_line/mcp/__init__.py +10 -0
  65. code_puppy/command_line/mcp/base.py +32 -0
  66. code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
  67. code_puppy/command_line/mcp/custom_server_form.py +688 -0
  68. code_puppy/command_line/mcp/custom_server_installer.py +195 -0
  69. code_puppy/command_line/mcp/edit_command.py +148 -0
  70. code_puppy/command_line/mcp/handler.py +138 -0
  71. code_puppy/command_line/mcp/help_command.py +147 -0
  72. code_puppy/command_line/mcp/install_command.py +214 -0
  73. code_puppy/command_line/mcp/install_menu.py +705 -0
  74. code_puppy/command_line/mcp/list_command.py +94 -0
  75. code_puppy/command_line/mcp/logs_command.py +235 -0
  76. code_puppy/command_line/mcp/remove_command.py +82 -0
  77. code_puppy/command_line/mcp/restart_command.py +100 -0
  78. code_puppy/command_line/mcp/search_command.py +123 -0
  79. code_puppy/command_line/mcp/start_all_command.py +135 -0
  80. code_puppy/command_line/mcp/start_command.py +117 -0
  81. code_puppy/command_line/mcp/status_command.py +184 -0
  82. code_puppy/command_line/mcp/stop_all_command.py +112 -0
  83. code_puppy/command_line/mcp/stop_command.py +80 -0
  84. code_puppy/command_line/mcp/test_command.py +107 -0
  85. code_puppy/command_line/mcp/utils.py +129 -0
  86. code_puppy/command_line/mcp/wizard_utils.py +334 -0
  87. code_puppy/command_line/mcp_completion.py +174 -0
  88. code_puppy/command_line/model_picker_completion.py +197 -0
  89. code_puppy/command_line/model_settings_menu.py +932 -0
  90. code_puppy/command_line/motd.py +91 -0
  91. code_puppy/command_line/onboarding_slides.py +179 -0
  92. code_puppy/command_line/onboarding_wizard.py +342 -0
  93. code_puppy/command_line/pin_command_completion.py +329 -0
  94. code_puppy/command_line/prompt_toolkit_completion.py +846 -0
  95. code_puppy/command_line/session_commands.py +302 -0
  96. code_puppy/command_line/skills_completion.py +160 -0
  97. code_puppy/command_line/uc_menu.py +893 -0
  98. code_puppy/command_line/utils.py +93 -0
  99. code_puppy/command_line/wiggum_state.py +78 -0
  100. code_puppy/config.py +1787 -0
  101. code_puppy/error_logging.py +133 -0
  102. code_puppy/gemini_code_assist.py +385 -0
  103. code_puppy/gemini_model.py +754 -0
  104. code_puppy/hook_engine/README.md +105 -0
  105. code_puppy/hook_engine/__init__.py +15 -0
  106. code_puppy/hook_engine/aliases.py +155 -0
  107. code_puppy/hook_engine/engine.py +195 -0
  108. code_puppy/hook_engine/executor.py +293 -0
  109. code_puppy/hook_engine/matcher.py +145 -0
  110. code_puppy/hook_engine/models.py +222 -0
  111. code_puppy/hook_engine/registry.py +106 -0
  112. code_puppy/hook_engine/validator.py +141 -0
  113. code_puppy/http_utils.py +361 -0
  114. code_puppy/keymap.py +128 -0
  115. code_puppy/main.py +10 -0
  116. code_puppy/mcp_/__init__.py +66 -0
  117. code_puppy/mcp_/async_lifecycle.py +286 -0
  118. code_puppy/mcp_/blocking_startup.py +469 -0
  119. code_puppy/mcp_/captured_stdio_server.py +275 -0
  120. code_puppy/mcp_/circuit_breaker.py +290 -0
  121. code_puppy/mcp_/config_wizard.py +507 -0
  122. code_puppy/mcp_/dashboard.py +308 -0
  123. code_puppy/mcp_/error_isolation.py +407 -0
  124. code_puppy/mcp_/examples/retry_example.py +226 -0
  125. code_puppy/mcp_/health_monitor.py +589 -0
  126. code_puppy/mcp_/managed_server.py +428 -0
  127. code_puppy/mcp_/manager.py +807 -0
  128. code_puppy/mcp_/mcp_logs.py +224 -0
  129. code_puppy/mcp_/registry.py +451 -0
  130. code_puppy/mcp_/retry_manager.py +337 -0
  131. code_puppy/mcp_/server_registry_catalog.py +1126 -0
  132. code_puppy/mcp_/status_tracker.py +355 -0
  133. code_puppy/mcp_/system_tools.py +209 -0
  134. code_puppy/mcp_prompts/__init__.py +1 -0
  135. code_puppy/mcp_prompts/hook_creator.py +103 -0
  136. code_puppy/messaging/__init__.py +255 -0
  137. code_puppy/messaging/bus.py +613 -0
  138. code_puppy/messaging/commands.py +167 -0
  139. code_puppy/messaging/markdown_patches.py +57 -0
  140. code_puppy/messaging/message_queue.py +361 -0
  141. code_puppy/messaging/messages.py +569 -0
  142. code_puppy/messaging/queue_console.py +271 -0
  143. code_puppy/messaging/renderers.py +311 -0
  144. code_puppy/messaging/rich_renderer.py +1153 -0
  145. code_puppy/messaging/spinner/__init__.py +83 -0
  146. code_puppy/messaging/spinner/console_spinner.py +240 -0
  147. code_puppy/messaging/spinner/spinner_base.py +96 -0
  148. code_puppy/messaging/subagent_console.py +460 -0
  149. code_puppy/model_factory.py +848 -0
  150. code_puppy/model_switching.py +63 -0
  151. code_puppy/model_utils.py +168 -0
  152. code_puppy/models.json +130 -0
  153. code_puppy/models_dev_api.json +1 -0
  154. code_puppy/models_dev_parser.py +592 -0
  155. code_puppy/plugins/__init__.py +186 -0
  156. code_puppy/plugins/agent_skills/__init__.py +22 -0
  157. code_puppy/plugins/agent_skills/config.py +175 -0
  158. code_puppy/plugins/agent_skills/discovery.py +136 -0
  159. code_puppy/plugins/agent_skills/downloader.py +392 -0
  160. code_puppy/plugins/agent_skills/installer.py +22 -0
  161. code_puppy/plugins/agent_skills/metadata.py +219 -0
  162. code_puppy/plugins/agent_skills/prompt_builder.py +100 -0
  163. code_puppy/plugins/agent_skills/register_callbacks.py +241 -0
  164. code_puppy/plugins/agent_skills/remote_catalog.py +322 -0
  165. code_puppy/plugins/agent_skills/skill_catalog.py +257 -0
  166. code_puppy/plugins/agent_skills/skills_install_menu.py +664 -0
  167. code_puppy/plugins/agent_skills/skills_menu.py +781 -0
  168. code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
  169. code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
  170. code_puppy/plugins/antigravity_oauth/antigravity_model.py +706 -0
  171. code_puppy/plugins/antigravity_oauth/config.py +42 -0
  172. code_puppy/plugins/antigravity_oauth/constants.py +133 -0
  173. code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
  174. code_puppy/plugins/antigravity_oauth/register_callbacks.py +518 -0
  175. code_puppy/plugins/antigravity_oauth/storage.py +288 -0
  176. code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
  177. code_puppy/plugins/antigravity_oauth/token.py +167 -0
  178. code_puppy/plugins/antigravity_oauth/transport.py +863 -0
  179. code_puppy/plugins/antigravity_oauth/utils.py +168 -0
  180. code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
  181. code_puppy/plugins/chatgpt_oauth/config.py +52 -0
  182. code_puppy/plugins/chatgpt_oauth/oauth_flow.py +328 -0
  183. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +176 -0
  184. code_puppy/plugins/chatgpt_oauth/test_plugin.py +295 -0
  185. code_puppy/plugins/chatgpt_oauth/utils.py +499 -0
  186. code_puppy/plugins/claude_code_hooks/__init__.py +1 -0
  187. code_puppy/plugins/claude_code_hooks/config.py +131 -0
  188. code_puppy/plugins/claude_code_hooks/register_callbacks.py +163 -0
  189. code_puppy/plugins/claude_code_oauth/README.md +167 -0
  190. code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
  191. code_puppy/plugins/claude_code_oauth/__init__.py +25 -0
  192. code_puppy/plugins/claude_code_oauth/config.py +52 -0
  193. code_puppy/plugins/claude_code_oauth/register_callbacks.py +453 -0
  194. code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
  195. code_puppy/plugins/claude_code_oauth/token_refresh_heartbeat.py +241 -0
  196. code_puppy/plugins/claude_code_oauth/utils.py +601 -0
  197. code_puppy/plugins/customizable_commands/__init__.py +0 -0
  198. code_puppy/plugins/customizable_commands/register_callbacks.py +152 -0
  199. code_puppy/plugins/example_custom_command/README.md +280 -0
  200. code_puppy/plugins/example_custom_command/register_callbacks.py +48 -0
  201. code_puppy/plugins/file_permission_handler/__init__.py +4 -0
  202. code_puppy/plugins/file_permission_handler/register_callbacks.py +528 -0
  203. code_puppy/plugins/frontend_emitter/__init__.py +25 -0
  204. code_puppy/plugins/frontend_emitter/emitter.py +121 -0
  205. code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
  206. code_puppy/plugins/hook_creator/__init__.py +1 -0
  207. code_puppy/plugins/hook_creator/register_callbacks.py +33 -0
  208. code_puppy/plugins/hook_manager/__init__.py +1 -0
  209. code_puppy/plugins/hook_manager/config.py +277 -0
  210. code_puppy/plugins/hook_manager/hooks_menu.py +551 -0
  211. code_puppy/plugins/hook_manager/register_callbacks.py +205 -0
  212. code_puppy/plugins/oauth_puppy_html.py +224 -0
  213. code_puppy/plugins/scheduler/__init__.py +1 -0
  214. code_puppy/plugins/scheduler/register_callbacks.py +88 -0
  215. code_puppy/plugins/scheduler/scheduler_menu.py +522 -0
  216. code_puppy/plugins/scheduler/scheduler_wizard.py +341 -0
  217. code_puppy/plugins/shell_safety/__init__.py +6 -0
  218. code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
  219. code_puppy/plugins/shell_safety/command_cache.py +156 -0
  220. code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
  221. code_puppy/plugins/synthetic_status/__init__.py +1 -0
  222. code_puppy/plugins/synthetic_status/register_callbacks.py +132 -0
  223. code_puppy/plugins/synthetic_status/status_api.py +147 -0
  224. code_puppy/plugins/universal_constructor/__init__.py +13 -0
  225. code_puppy/plugins/universal_constructor/models.py +138 -0
  226. code_puppy/plugins/universal_constructor/register_callbacks.py +47 -0
  227. code_puppy/plugins/universal_constructor/registry.py +302 -0
  228. code_puppy/plugins/universal_constructor/sandbox.py +584 -0
  229. code_puppy/prompts/antigravity_system_prompt.md +1 -0
  230. code_puppy/pydantic_patches.py +317 -0
  231. code_puppy/reopenable_async_client.py +232 -0
  232. code_puppy/round_robin_model.py +150 -0
  233. code_puppy/scheduler/__init__.py +41 -0
  234. code_puppy/scheduler/__main__.py +9 -0
  235. code_puppy/scheduler/cli.py +118 -0
  236. code_puppy/scheduler/config.py +126 -0
  237. code_puppy/scheduler/daemon.py +280 -0
  238. code_puppy/scheduler/executor.py +155 -0
  239. code_puppy/scheduler/platform.py +19 -0
  240. code_puppy/scheduler/platform_unix.py +22 -0
  241. code_puppy/scheduler/platform_win.py +32 -0
  242. code_puppy/session_storage.py +338 -0
  243. code_puppy/status_display.py +257 -0
  244. code_puppy/summarization_agent.py +176 -0
  245. code_puppy/terminal_utils.py +418 -0
  246. code_puppy/tools/__init__.py +470 -0
  247. code_puppy/tools/agent_tools.py +616 -0
  248. code_puppy/tools/ask_user_question/__init__.py +26 -0
  249. code_puppy/tools/ask_user_question/constants.py +73 -0
  250. code_puppy/tools/ask_user_question/demo_tui.py +55 -0
  251. code_puppy/tools/ask_user_question/handler.py +232 -0
  252. code_puppy/tools/ask_user_question/models.py +304 -0
  253. code_puppy/tools/ask_user_question/registration.py +36 -0
  254. code_puppy/tools/ask_user_question/renderers.py +309 -0
  255. code_puppy/tools/ask_user_question/terminal_ui.py +329 -0
  256. code_puppy/tools/ask_user_question/theme.py +155 -0
  257. code_puppy/tools/ask_user_question/tui_loop.py +423 -0
  258. code_puppy/tools/browser/__init__.py +37 -0
  259. code_puppy/tools/browser/browser_control.py +289 -0
  260. code_puppy/tools/browser/browser_interactions.py +545 -0
  261. code_puppy/tools/browser/browser_locators.py +640 -0
  262. code_puppy/tools/browser/browser_manager.py +378 -0
  263. code_puppy/tools/browser/browser_navigation.py +251 -0
  264. code_puppy/tools/browser/browser_screenshot.py +179 -0
  265. code_puppy/tools/browser/browser_scripts.py +462 -0
  266. code_puppy/tools/browser/browser_workflows.py +221 -0
  267. code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
  268. code_puppy/tools/browser/terminal_command_tools.py +534 -0
  269. code_puppy/tools/browser/terminal_screenshot_tools.py +552 -0
  270. code_puppy/tools/browser/terminal_tools.py +525 -0
  271. code_puppy/tools/command_runner.py +1346 -0
  272. code_puppy/tools/common.py +1409 -0
  273. code_puppy/tools/display.py +84 -0
  274. code_puppy/tools/file_modifications.py +739 -0
  275. code_puppy/tools/file_operations.py +802 -0
  276. code_puppy/tools/scheduler_tools.py +412 -0
  277. code_puppy/tools/skills_tools.py +251 -0
  278. code_puppy/tools/subagent_context.py +158 -0
  279. code_puppy/tools/tools_content.py +51 -0
  280. code_puppy/tools/universal_constructor.py +889 -0
  281. code_puppy/uvx_detection.py +242 -0
  282. code_puppy/version_checker.py +82 -0
  283. newcode-0.1.1.data/data/code_puppy/models.json +130 -0
  284. newcode-0.1.1.data/data/code_puppy/models_dev_api.json +1 -0
  285. newcode-0.1.1.dist-info/METADATA +154 -0
  286. newcode-0.1.1.dist-info/RECORD +289 -0
  287. newcode-0.1.1.dist-info/WHEEL +4 -0
  288. newcode-0.1.1.dist-info/entry_points.txt +3 -0
  289. newcode-0.1.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,2145 @@
1
+ """Base agent configuration class for defining agent properties."""
2
+
3
+ import asyncio
4
+ import dataclasses
5
+ import json
6
+ import math
7
+ import pathlib
8
+ import signal
9
+ import threading
10
+ import time
11
+ import traceback
12
+ import uuid
13
+ from abc import ABC, abstractmethod
14
+ from typing import (
15
+ Any,
16
+ Callable,
17
+ Dict,
18
+ List,
19
+ Optional,
20
+ Sequence,
21
+ Set,
22
+ Tuple,
23
+ Type,
24
+ Union,
25
+ )
26
+
27
+ import mcp
28
+ import pydantic
29
+ import pydantic_ai.models
30
+ from dbos import DBOS, SetWorkflowID
31
+ from pydantic_ai import Agent as PydanticAgent
32
+ from pydantic_ai import (
33
+ BinaryContent,
34
+ DocumentUrl,
35
+ ImageUrl,
36
+ RunContext,
37
+ UsageLimitExceeded,
38
+ UsageLimits,
39
+ )
40
+ from pydantic_ai.durable_exec.dbos import DBOSAgent
41
+ from pydantic_ai.messages import (
42
+ ModelMessage,
43
+ ModelRequest,
44
+ ModelResponse,
45
+ TextPart,
46
+ ThinkingPart,
47
+ ToolCallPart,
48
+ ToolCallPartDelta,
49
+ ToolReturn,
50
+ ToolReturnPart,
51
+ )
52
+ from rich.text import Text
53
+
54
+ from code_puppy.agents.event_stream_handler import event_stream_handler
55
+ from code_puppy.callbacks import (
56
+ on_agent_run_end,
57
+ on_agent_run_start,
58
+ on_message_history_processor_end,
59
+ on_message_history_processor_start,
60
+ )
61
+
62
+ # Consolidated relative imports
63
+ from code_puppy.config import (
64
+ get_agent_pinned_model,
65
+ get_compaction_strategy,
66
+ get_compaction_threshold,
67
+ get_global_model_name,
68
+ get_message_limit,
69
+ get_protected_token_count,
70
+ get_use_dbos,
71
+ get_value,
72
+ )
73
+ from code_puppy.error_logging import log_error
74
+ from code_puppy.keymap import cancel_agent_uses_signal, get_cancel_agent_char_code
75
+ from code_puppy.mcp_ import get_mcp_manager
76
+ from code_puppy.messaging import (
77
+ emit_error,
78
+ emit_info,
79
+ emit_warning,
80
+ )
81
+ from code_puppy.messaging.spinner import (
82
+ SpinnerBase,
83
+ update_spinner_context,
84
+ )
85
+ from code_puppy.model_factory import ModelFactory, make_model_settings
86
+ from code_puppy.summarization_agent import run_summarization_sync, SummarizationError
87
+ from code_puppy.tools.agent_tools import _active_subagent_tasks
88
+ from code_puppy.tools.command_runner import (
89
+ is_awaiting_user_input,
90
+ )
91
+
92
+ # Global flag to track delayed compaction requests
93
+ _delayed_compaction_requested = False
94
+
95
+ _reload_count = 0
96
+
97
+
98
+ def _log_error_to_file(exc: Exception) -> Optional[str]:
99
+ """Log detailed error information to ~/.code_puppy/error_logs/log_{timestamp}.txt.
100
+
101
+ Args:
102
+ exc: The exception to log.
103
+
104
+ Returns:
105
+ The path to the log file if successful, None otherwise.
106
+ """
107
+ try:
108
+ from code_puppy.error_logging import get_logs_dir
109
+
110
+ error_logs_dir = pathlib.Path(get_logs_dir())
111
+ error_logs_dir.mkdir(parents=True, exist_ok=True)
112
+
113
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
114
+ log_file = error_logs_dir / f"log_{timestamp}.txt"
115
+
116
+ with open(log_file, "w", encoding="utf-8") as f:
117
+ f.write(f"Timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
118
+ f.write(f"Exception Type: {type(exc).__name__}\n")
119
+ f.write(f"Exception Message: {str(exc)}\n")
120
+ f.write(f"Exception Args: {exc.args}\n")
121
+ f.write("\n--- Full Traceback ---\n")
122
+ f.write(traceback.format_exc())
123
+ f.write("\n--- Exception Chain ---\n")
124
+ # Walk the exception chain for chained exceptions
125
+ current = exc
126
+ chain_depth = 0
127
+ while current is not None and chain_depth < 10:
128
+ f.write(
129
+ f"\n[Cause {chain_depth}] {type(current).__name__}: {current}\n"
130
+ )
131
+ f.write("".join(traceback.format_tb(current.__traceback__)))
132
+ current = (
133
+ current.__cause__ if current.__cause__ else current.__context__
134
+ )
135
+ chain_depth += 1
136
+
137
+ return str(log_file)
138
+ except Exception:
139
+ # Don't let logging errors break the main flow
140
+ return None
141
+
142
+
143
+ class BaseAgent(ABC):
144
+ """Base class for all agent configurations."""
145
+
146
+ def __init__(self):
147
+ self.id = str(uuid.uuid4())
148
+ self._message_history: List[Any] = []
149
+ self._compacted_message_hashes: Set[str] = set()
150
+ # Agent construction cache
151
+ self._code_generation_agent = None
152
+ self._last_model_name: Optional[str] = None
153
+ # Puppy rules loaded lazily
154
+ self._puppy_rules: Optional[str] = None
155
+ self.cur_model: pydantic_ai.models.Model
156
+ # Cache for MCP tool definitions (for token estimation)
157
+ # This is populated after the first successful run when MCP tools are retrieved
158
+ self._mcp_tool_definitions_cache: List[Dict[str, Any]] = []
159
+
160
+ def get_identity(self) -> str:
161
+ """Get a unique identity for this agent instance.
162
+
163
+ Returns:
164
+ A string like 'python-programmer-a3f2b1' combining name + short UUID.
165
+ """
166
+ return f"{self.name}-{self.id[:6]}"
167
+
168
+ def get_identity_prompt(self) -> str:
169
+ """Get the identity prompt suffix to embed in system prompts.
170
+
171
+ Returns:
172
+ A string instructing the agent about its identity for task ownership.
173
+ """
174
+ return (
175
+ f"\n\nYour ID is `{self.get_identity()}`. "
176
+ "Use this for any tasks which require identifying yourself "
177
+ "such as claiming task ownership or coordination with other agents."
178
+ )
179
+
180
+ def get_full_system_prompt(self) -> str:
181
+ """Get the complete system prompt with identity automatically appended.
182
+
183
+ This wraps get_system_prompt() and appends the agent's identity,
184
+ so subclasses don't need to worry about it.
185
+
186
+ Returns:
187
+ The full system prompt including identity information.
188
+ """
189
+ return self.get_system_prompt() + self.get_identity_prompt()
190
+
191
+ @property
192
+ @abstractmethod
193
+ def name(self) -> str:
194
+ """Unique identifier for the agent."""
195
+ pass
196
+
197
+ @property
198
+ @abstractmethod
199
+ def display_name(self) -> str:
200
+ """Human-readable name for the agent."""
201
+ pass
202
+
203
+ @property
204
+ @abstractmethod
205
+ def description(self) -> str:
206
+ """Brief description of what this agent does."""
207
+ pass
208
+
209
+ @abstractmethod
210
+ def get_system_prompt(self) -> str:
211
+ """Get the system prompt for this agent."""
212
+ pass
213
+
214
+ @abstractmethod
215
+ def get_available_tools(self) -> List[str]:
216
+ """Get list of tool names that this agent should have access to.
217
+
218
+ Returns:
219
+ List of tool names to register for this agent.
220
+ """
221
+ pass
222
+
223
+ def get_tools_config(self) -> Optional[Dict[str, Any]]:
224
+ """Get tool configuration for this agent.
225
+
226
+ Returns:
227
+ Dict with tool configuration, or None to use default tools.
228
+ """
229
+ return None
230
+
231
+ def get_user_prompt(self) -> Optional[str]:
232
+ """Get custom user prompt for this agent.
233
+
234
+ Returns:
235
+ Custom prompt string, or None to use default.
236
+ """
237
+ return None
238
+
239
+ # Message history management methods
240
+ def get_message_history(self) -> List[Any]:
241
+ """Get the message history for this agent.
242
+
243
+ Returns:
244
+ List of messages in this agent's conversation history.
245
+ """
246
+ return self._message_history
247
+
248
+ def set_message_history(self, history: List[Any]) -> None:
249
+ """Set the message history for this agent.
250
+
251
+ Args:
252
+ history: List of messages to set as the conversation history.
253
+ """
254
+ self._message_history = history
255
+
256
+ def clear_message_history(self) -> None:
257
+ """Clear the message history for this agent."""
258
+ self._message_history = []
259
+ self._compacted_message_hashes.clear()
260
+
261
+ def append_to_message_history(self, message: Any) -> None:
262
+ """Append a message to this agent's history.
263
+
264
+ Args:
265
+ message: Message to append to the conversation history.
266
+ """
267
+ self._message_history.append(message)
268
+
269
+ def extend_message_history(self, history: List[Any]) -> None:
270
+ """Extend this agent's message history with multiple messages.
271
+
272
+ Args:
273
+ history: List of messages to append to the conversation history.
274
+ """
275
+ self._message_history.extend(history)
276
+
277
+ def get_compacted_message_hashes(self) -> Set[str]:
278
+ """Get the set of compacted message hashes for this agent.
279
+
280
+ Returns:
281
+ Set of hashes for messages that have been compacted/summarized.
282
+ """
283
+ return self._compacted_message_hashes
284
+
285
+ def add_compacted_message_hash(self, message_hash: str) -> None:
286
+ """Add a message hash to the set of compacted message hashes.
287
+
288
+ Args:
289
+ message_hash: Hash of a message that has been compacted/summarized.
290
+ """
291
+ self._compacted_message_hashes.add(message_hash)
292
+
293
+ def get_model_name(self) -> Optional[str]:
294
+ """Get pinned model name for this agent, if specified.
295
+
296
+ Returns:
297
+ Model name to use for this agent, or global default if none pinned.
298
+ """
299
+ pinned = get_agent_pinned_model(self.name)
300
+ if pinned == "" or pinned is None:
301
+ return get_global_model_name()
302
+ return pinned
303
+
304
+ def _clean_binaries(self, messages: List[ModelMessage]) -> List[ModelMessage]:
305
+ """Remove BinaryContent items from message parts.
306
+
307
+ Note: This mutates the messages in-place by modifying part.content.
308
+ The return value is the same list for API consistency.
309
+ """
310
+ for message in messages:
311
+ for part in message.parts:
312
+ if hasattr(part, "content") and isinstance(part.content, list):
313
+ part.content = [
314
+ item
315
+ for item in part.content
316
+ if not isinstance(item, BinaryContent)
317
+ ]
318
+ return messages
319
+
320
+ def ensure_history_ends_with_request(
321
+ self, messages: List[ModelMessage]
322
+ ) -> List[ModelMessage]:
323
+ """Ensure message history ends with a ModelRequest.
324
+
325
+ pydantic_ai requires that processed message history ends with a ModelRequest.
326
+ This can fail when swapping models mid-conversation if the history ends with
327
+ a ModelResponse from the previous model.
328
+
329
+ This method trims trailing ModelResponse messages to ensure compatibility.
330
+
331
+ Args:
332
+ messages: List of messages to validate/fix.
333
+
334
+ Returns:
335
+ List of messages guaranteed to end with ModelRequest, or empty list
336
+ if no ModelRequest is found.
337
+ """
338
+ messages = list(messages) # defensive copy
339
+ if not messages:
340
+ return messages
341
+
342
+ # Trim trailing ModelResponse messages
343
+ while messages and isinstance(messages[-1], ModelResponse):
344
+ messages = messages[:-1]
345
+
346
+ return messages
347
+
348
+ # Message history processing methods (moved from state_management.py and message_history_processor.py)
349
+ def _stringify_part(self, part: Any) -> str:
350
+ """Create a stable string representation for a message part.
351
+
352
+ We deliberately ignore timestamps so identical content hashes the same even when
353
+ emitted at different times. This prevents status updates from blowing up the
354
+ history when they are repeated with new timestamps."""
355
+
356
+ attributes: List[str] = [part.__class__.__name__]
357
+
358
+ # Role/instructions help disambiguate parts that otherwise share content
359
+ if hasattr(part, "role") and part.role:
360
+ attributes.append(f"role={part.role}")
361
+ if hasattr(part, "instructions") and part.instructions:
362
+ attributes.append(f"instructions={part.instructions}")
363
+
364
+ if hasattr(part, "tool_call_id") and part.tool_call_id:
365
+ attributes.append(f"tool_call_id={part.tool_call_id}")
366
+
367
+ if hasattr(part, "tool_name") and part.tool_name:
368
+ attributes.append(f"tool_name={part.tool_name}")
369
+
370
+ content = getattr(part, "content", None)
371
+ if content is None:
372
+ attributes.append("content=None")
373
+ elif isinstance(content, str):
374
+ attributes.append(f"content={content}")
375
+ elif isinstance(content, pydantic.BaseModel):
376
+ attributes.append(
377
+ f"content={json.dumps(content.model_dump(), sort_keys=True)}"
378
+ )
379
+ elif isinstance(content, dict):
380
+ attributes.append(f"content={json.dumps(content, sort_keys=True)}")
381
+ elif isinstance(content, list):
382
+ for item in content:
383
+ if isinstance(item, str):
384
+ attributes.append(f"content={item}")
385
+ if isinstance(item, BinaryContent):
386
+ attributes.append(f"BinaryContent={hash(item.data)}")
387
+ else:
388
+ attributes.append(f"content={repr(content)}")
389
+ result = "|".join(attributes)
390
+ return result
391
+
392
+ def hash_message(self, message: Any) -> int:
393
+ """Create a stable hash for a model message that ignores timestamps."""
394
+ role = getattr(message, "role", None)
395
+ instructions = getattr(message, "instructions", None)
396
+ header_bits: List[str] = []
397
+ if role:
398
+ header_bits.append(f"role={role}")
399
+ if instructions:
400
+ header_bits.append(f"instructions={instructions}")
401
+
402
+ part_strings = [
403
+ self._stringify_part(part) for part in getattr(message, "parts", [])
404
+ ]
405
+ canonical = "||".join(header_bits + part_strings)
406
+ return hash(canonical)
407
+
408
+ def stringify_message_part(self, part) -> str:
409
+ """
410
+ Convert a message part to a string representation for token estimation or other uses.
411
+
412
+ Args:
413
+ part: A message part that may contain content or be a tool call
414
+
415
+ Returns:
416
+ String representation of the message part
417
+ """
418
+ result = ""
419
+ if hasattr(part, "part_kind"):
420
+ result += part.part_kind + ": "
421
+ else:
422
+ result += str(type(part)) + ": "
423
+
424
+ # Handle content
425
+ if hasattr(part, "content") and part.content:
426
+ # Handle different content types
427
+ if isinstance(part.content, str):
428
+ result = part.content
429
+ elif isinstance(part.content, pydantic.BaseModel):
430
+ result = json.dumps(part.content.model_dump())
431
+ elif isinstance(part.content, dict):
432
+ result = json.dumps(part.content)
433
+ elif isinstance(part.content, list):
434
+ result = ""
435
+ for item in part.content:
436
+ if isinstance(item, str):
437
+ result += item + "\n"
438
+ if isinstance(item, BinaryContent):
439
+ result += f"BinaryContent={hash(item.data)}\n"
440
+ else:
441
+ result = str(part.content)
442
+
443
+ # Handle tool calls which may have additional token costs
444
+ # If part also has content, we'll process tool calls separately
445
+ if hasattr(part, "tool_name") and part.tool_name:
446
+ # Estimate tokens for tool name and parameters
447
+ tool_text = part.tool_name
448
+ if hasattr(part, "args"):
449
+ tool_text += f" {str(part.args)}"
450
+ result += tool_text
451
+
452
+ return result
453
+
454
+ def estimate_token_count(self, text: str) -> int:
455
+ """
456
+ Simple token estimation using len(message) / 2.5.
457
+ This replaces tiktoken with a much simpler approach.
458
+ """
459
+ return max(1, math.floor((len(text) / 2.5)))
460
+
461
+ def estimate_tokens_for_message(self, message: ModelMessage) -> int:
462
+ """
463
+ Estimate the number of tokens in a message using len(message)
464
+ Simple and fast replacement for tiktoken.
465
+ """
466
+ total_tokens = 0
467
+
468
+ for part in message.parts:
469
+ part_str = self.stringify_message_part(part)
470
+ if part_str:
471
+ total_tokens += self.estimate_token_count(part_str)
472
+
473
+ return max(1, total_tokens)
474
+
475
+ def estimate_context_overhead_tokens(self) -> int:
476
+ """
477
+ Estimate the token overhead from system prompt and tool definitions.
478
+
479
+ This accounts for tokens that are always present in the context:
480
+ - System prompt (for non-Claude-Code models)
481
+ - Tool definitions (name, description, parameter schema)
482
+ - MCP tool definitions
483
+
484
+ Note: For Claude Code models, the system prompt is prepended to the first
485
+ user message, so it's already counted in the message history tokens.
486
+ We only count the short fixed instructions for Claude Code models.
487
+ """
488
+ total_tokens = 0
489
+
490
+ # 1. Estimate tokens for system prompt / instructions
491
+ # Use prepare_prompt_for_model() to get the correct instructions for token counting.
492
+ # For models that prepend system prompt to user message (claude-code, antigravity),
493
+ # this returns the short fixed instructions. For other models, returns full prompt.
494
+ try:
495
+ from code_puppy.model_utils import prepare_prompt_for_model
496
+
497
+ model_name = (
498
+ self.get_model_name() if hasattr(self, "get_model_name") else ""
499
+ )
500
+ system_prompt = self.get_full_system_prompt()
501
+
502
+ # Get the instructions that will be used (handles model-specific logic via hooks)
503
+ prepared = prepare_prompt_for_model(
504
+ model_name=model_name,
505
+ system_prompt=system_prompt,
506
+ user_prompt="", # Empty - we just need the instructions
507
+ prepend_system_to_user=False, # Don't modify prompt, just get instructions
508
+ )
509
+
510
+ if prepared.instructions:
511
+ total_tokens += self.estimate_token_count(prepared.instructions)
512
+ except Exception:
513
+ pass # If we can't get system prompt, skip it
514
+
515
+ # 2. Estimate tokens for pydantic_agent tool definitions
516
+ pydantic_agent = getattr(self, "pydantic_agent", None)
517
+ if pydantic_agent:
518
+ tools = getattr(pydantic_agent, "_tools", None)
519
+ if tools and isinstance(tools, dict):
520
+ for tool_name, tool_func in tools.items():
521
+ try:
522
+ # Estimate tokens from tool name
523
+ total_tokens += self.estimate_token_count(tool_name)
524
+
525
+ # Estimate tokens from tool description
526
+ description = getattr(tool_func, "__doc__", None) or ""
527
+ if description:
528
+ total_tokens += self.estimate_token_count(description)
529
+
530
+ # Estimate tokens from parameter schema
531
+ # Tools may have a schema attribute or we can try to get it from annotations
532
+ schema = getattr(tool_func, "schema", None)
533
+ if schema:
534
+ schema_str = (
535
+ json.dumps(schema)
536
+ if isinstance(schema, dict)
537
+ else str(schema)
538
+ )
539
+ total_tokens += self.estimate_token_count(schema_str)
540
+ else:
541
+ # Try to get schema from function annotations
542
+ annotations = getattr(tool_func, "__annotations__", None)
543
+ if annotations:
544
+ total_tokens += self.estimate_token_count(
545
+ str(annotations)
546
+ )
547
+ except Exception:
548
+ continue # Skip tools we can't process
549
+
550
+ # 3. Estimate tokens for MCP tool definitions from cache
551
+ # MCP tools are fetched asynchronously, so we use a cache that's populated
552
+ # after the first successful run. See _update_mcp_tool_cache() method.
553
+ mcp_tool_cache = getattr(self, "_mcp_tool_definitions_cache", [])
554
+ if mcp_tool_cache:
555
+ for tool_def in mcp_tool_cache:
556
+ try:
557
+ # Estimate tokens from tool name
558
+ tool_name = tool_def.get("name", "")
559
+ if tool_name:
560
+ total_tokens += self.estimate_token_count(tool_name)
561
+
562
+ # Estimate tokens from tool description
563
+ description = tool_def.get("description", "")
564
+ if description:
565
+ total_tokens += self.estimate_token_count(description)
566
+
567
+ # Estimate tokens from parameter schema (inputSchema)
568
+ input_schema = tool_def.get("inputSchema")
569
+ if input_schema:
570
+ schema_str = (
571
+ json.dumps(input_schema)
572
+ if isinstance(input_schema, dict)
573
+ else str(input_schema)
574
+ )
575
+ total_tokens += self.estimate_token_count(schema_str)
576
+ except Exception:
577
+ continue # Skip tools we can't process
578
+
579
+ return total_tokens
580
+
581
+ async def _update_mcp_tool_cache(self) -> None:
582
+ """
583
+ Update the MCP tool definitions cache by fetching tools from running MCP servers.
584
+
585
+ This should be called after a successful run to populate the cache for
586
+ accurate token estimation in subsequent runs.
587
+ """
588
+ mcp_servers = getattr(self, "_mcp_servers", None)
589
+ if not mcp_servers:
590
+ return
591
+
592
+ tool_definitions = []
593
+ for mcp_server in mcp_servers:
594
+ try:
595
+ # Check if the server has list_tools method (pydantic-ai MCP servers)
596
+ if hasattr(mcp_server, "list_tools"):
597
+ # list_tools() returns list[mcp_types.Tool]
598
+ tools = await mcp_server.list_tools()
599
+ for tool in tools:
600
+ tool_def = {
601
+ "name": getattr(tool, "name", ""),
602
+ "description": getattr(tool, "description", ""),
603
+ "inputSchema": getattr(tool, "inputSchema", {}),
604
+ }
605
+ tool_definitions.append(tool_def)
606
+ except Exception:
607
+ # Server might not be running or accessible, skip it
608
+ continue
609
+
610
+ self._mcp_tool_definitions_cache = tool_definitions
611
+
612
+ def update_mcp_tool_cache_sync(self) -> None:
613
+ """
614
+ Synchronously clear the MCP tool cache.
615
+
616
+ This clears the cache so that token counts will be recalculated on the next
617
+ agent run. Call this after starting/stopping MCP servers.
618
+
619
+ Note: We don't try to fetch tools synchronously because MCP servers require
620
+ async context management that doesn't work well from sync code. The cache
621
+ will be repopulated on the next successful agent run.
622
+ """
623
+ # Simply clear the cache - it will be repopulated on the next agent run
624
+ # This is safer than trying to call async methods from sync context
625
+ self._mcp_tool_definitions_cache = []
626
+
627
+ def _is_tool_call_part(self, part: Any) -> bool:
628
+ if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
629
+ return True
630
+
631
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
632
+ if part_kind == "tool-call":
633
+ return True
634
+
635
+ has_tool_name = getattr(part, "tool_name", None) is not None
636
+ has_args = getattr(part, "args", None) is not None
637
+ has_args_delta = getattr(part, "args_delta", None) is not None
638
+
639
+ return bool(has_tool_name and (has_args or has_args_delta))
640
+
641
+ def _is_tool_return_part(self, part: Any) -> bool:
642
+ if isinstance(part, (ToolReturnPart, ToolReturn)):
643
+ return True
644
+
645
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
646
+ if part_kind in {"tool-return", "tool-result"}:
647
+ return True
648
+
649
+ if getattr(part, "tool_call_id", None) is None:
650
+ return False
651
+
652
+ has_content = getattr(part, "content", None) is not None
653
+ has_content_delta = getattr(part, "content_delta", None) is not None
654
+ return bool(has_content or has_content_delta)
655
+
656
+ def filter_huge_messages(self, messages: List[ModelMessage]) -> List[ModelMessage]:
657
+ filtered = [m for m in messages if self.estimate_tokens_for_message(m) < 50000]
658
+ pruned = self.prune_interrupted_tool_calls(filtered)
659
+ return pruned
660
+
661
+ def _find_safe_split_index(
662
+ self, messages: List[ModelMessage], initial_split_idx: int
663
+ ) -> int:
664
+ """
665
+ Adjust split index to avoid breaking tool_use/tool_result pairs.
666
+
667
+ Ensures that if a tool_result is in the protected zone, its corresponding
668
+ tool_use is also included. Otherwise the LLM will error with
669
+ 'tool_use ids found without tool_result blocks'.
670
+
671
+ Args:
672
+ messages: Full message list
673
+ initial_split_idx: The initial split point (messages before this go to summarize)
674
+
675
+ Returns:
676
+ Adjusted split index that doesn't break tool pairs
677
+ """
678
+ if initial_split_idx <= 1:
679
+ return initial_split_idx
680
+
681
+ # Collect tool_call_ids from messages AFTER the split (protected zone)
682
+ protected_tool_return_ids: Set[str] = set()
683
+ for msg in messages[initial_split_idx:]:
684
+ for part in getattr(msg, "parts", []) or []:
685
+ if getattr(part, "part_kind", None) == "tool-return":
686
+ tool_call_id = getattr(part, "tool_call_id", None)
687
+ if tool_call_id:
688
+ protected_tool_return_ids.add(tool_call_id)
689
+
690
+ if not protected_tool_return_ids:
691
+ return initial_split_idx
692
+
693
+ # Scan backwards from split point to find any tool_uses that match protected returns
694
+ adjusted_idx = initial_split_idx
695
+ for i in range(
696
+ initial_split_idx - 1, 0, -1
697
+ ): # Don't include system message at 0
698
+ msg = messages[i]
699
+ has_matching_tool_use = False
700
+ for part in getattr(msg, "parts", []) or []:
701
+ if getattr(part, "part_kind", None) == "tool-call":
702
+ tool_call_id = getattr(part, "tool_call_id", None)
703
+ if tool_call_id and tool_call_id in protected_tool_return_ids:
704
+ has_matching_tool_use = True
705
+ break
706
+
707
+ if has_matching_tool_use:
708
+ # This message has a tool_use whose return is in protected zone
709
+ # Move the split point back to include this message in protected zone
710
+ adjusted_idx = i
711
+ else:
712
+ # Once we find a message without matching tool_use, we can stop
713
+ # (tool calls and returns should be adjacent)
714
+ break
715
+
716
+ return adjusted_idx
717
+
718
+ def split_messages_for_protected_summarization(
719
+ self,
720
+ messages: List[ModelMessage],
721
+ ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
722
+ """
723
+ Split messages into two groups: messages to summarize and protected recent messages.
724
+
725
+ Returns:
726
+ Tuple of (messages_to_summarize, protected_messages)
727
+
728
+ The protected_messages are the most recent messages that total up to the configured protected token count.
729
+ The system message (first message) is always protected.
730
+ All other messages that don't fit in the protected zone will be summarized.
731
+ """
732
+ if len(messages) <= 1: # Just system message or empty
733
+ return [], messages
734
+
735
+ # Always protect the system message (first message)
736
+ system_message = messages[0]
737
+ system_tokens = self.estimate_tokens_for_message(system_message)
738
+
739
+ if len(messages) == 1:
740
+ return [], messages
741
+
742
+ # Get the configured protected token count
743
+ protected_tokens_limit = get_protected_token_count()
744
+
745
+ # Calculate tokens for messages from most recent backwards (excluding system message)
746
+ protected_messages = []
747
+ protected_token_count = system_tokens # Start with system message tokens
748
+
749
+ # Go backwards through non-system messages to find protected zone
750
+ for i in range(
751
+ len(messages) - 1, 0, -1
752
+ ): # Stop at 1, not 0 (skip system message)
753
+ message = messages[i]
754
+ message_tokens = self.estimate_tokens_for_message(message)
755
+
756
+ # If adding this message would exceed protected tokens, stop here
757
+ if protected_token_count + message_tokens > protected_tokens_limit:
758
+ break
759
+
760
+ protected_messages.append(message)
761
+ protected_token_count += message_tokens
762
+
763
+ # Messages that were added while scanning backwards are currently in reverse order.
764
+ # Reverse them to restore chronological ordering, then prepend the system prompt.
765
+ protected_messages.reverse()
766
+ protected_messages.insert(0, system_message)
767
+
768
+ # Messages to summarize are everything between the system message and the
769
+ # protected tail zone we just constructed.
770
+ protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
771
+
772
+ # IMPORTANT: Adjust split point to avoid breaking tool_use/tool_result pairs
773
+ # The LLM requires every tool_use to have its tool_result immediately after
774
+ protected_start_idx = self._find_safe_split_index(messages, protected_start_idx)
775
+
776
+ messages_to_summarize = messages[1:protected_start_idx]
777
+
778
+ # Emit info messages
779
+ emit_info(
780
+ f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
781
+ )
782
+ emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
783
+
784
+ return messages_to_summarize, protected_messages
785
+
786
+ def summarize_messages(
787
+ self, messages: List[ModelMessage], with_protection: bool = True
788
+ ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
789
+ """
790
+ Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
791
+
792
+ Returns:
793
+ Tuple of (compacted_messages, summarized_source_messages)
794
+ where compacted_messages always preserves the original system message
795
+ as the first entry.
796
+ """
797
+ messages_to_summarize: List[ModelMessage]
798
+ protected_messages: List[ModelMessage]
799
+
800
+ if with_protection:
801
+ messages_to_summarize, protected_messages = (
802
+ self.split_messages_for_protected_summarization(messages)
803
+ )
804
+ else:
805
+ messages_to_summarize = messages[1:] if messages else []
806
+ protected_messages = messages[:1]
807
+
808
+ if not messages:
809
+ return [], []
810
+
811
+ system_message = messages[0]
812
+
813
+ if not messages_to_summarize:
814
+ # Nothing to summarize, so just return the original sequence
815
+ return self.prune_interrupted_tool_calls(messages), []
816
+
817
+ instructions = (
818
+ "The input will be a log of Agentic AI steps that have been taken"
819
+ " as well as user queries, etc. Summarize the contents of these steps."
820
+ " The high level details should remain but the bulk of the content from tool-call"
821
+ " responses should be compacted and summarized. For example if you see a tool-call"
822
+ " reading a file, and the file contents are large, then in your summary you might just"
823
+ " write: * used read_file on space_invaders.cpp - contents removed."
824
+ "\n Make sure your result is a bulleted list of all steps and interactions."
825
+ "\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
826
+ )
827
+
828
+ try:
829
+ # Prune any orphaned tool calls from messages before sending to LLM
830
+ # The LLM requires every tool_use to have a matching tool_result
831
+ pruned_messages_to_summarize = self.prune_interrupted_tool_calls(
832
+ messages_to_summarize
833
+ )
834
+
835
+ if not pruned_messages_to_summarize:
836
+ # After pruning, nothing left to summarize
837
+ return self.prune_interrupted_tool_calls(messages), []
838
+
839
+ new_messages = run_summarization_sync(
840
+ instructions, message_history=pruned_messages_to_summarize
841
+ )
842
+
843
+ if not isinstance(new_messages, list):
844
+ emit_warning(
845
+ "Summarization agent returned non-list output; wrapping into message request"
846
+ )
847
+ new_messages = [ModelRequest([TextPart(str(new_messages))])]
848
+
849
+ compacted: List[ModelMessage] = [system_message] + list(new_messages)
850
+
851
+ # Drop the system message from protected_messages because we already included it
852
+ protected_tail = [
853
+ msg for msg in protected_messages if msg is not system_message
854
+ ]
855
+
856
+ compacted.extend(protected_tail)
857
+
858
+ return self.prune_interrupted_tool_calls(compacted), messages_to_summarize
859
+ except SummarizationError as e:
860
+ # SummarizationError has detailed error info
861
+ emit_error(f"Summarization failed: {e}")
862
+ if e.original_error:
863
+ emit_warning(
864
+ f"💡 Tip: Underlying error was {type(e.original_error).__name__}. "
865
+ "Consider using '/set compaction_strategy=truncation' as a fallback."
866
+ )
867
+ return messages, [] # Return original messages on failure
868
+ except Exception as e:
869
+ # Catch-all for unexpected errors
870
+ error_type = type(e).__name__
871
+ error_msg = str(e) if str(e) else "(no error details)"
872
+ emit_error(
873
+ f"Unexpected error during compaction: [{error_type}] {error_msg}"
874
+ )
875
+ return messages, [] # Return original messages on failure
876
+
877
+ def get_model_context_length(self) -> int:
878
+ """
879
+ Return the context length for this agent's effective model.
880
+
881
+ Honors per-agent pinned model via `self.get_model_name()`; falls back
882
+ to global model when no pin is set. Defaults conservatively on failure.
883
+ """
884
+ try:
885
+ model_configs = ModelFactory.load_config()
886
+ # Use the agent's effective model (respects /pin_model)
887
+ model_name = self.get_model_name()
888
+ model_config = model_configs.get(model_name, {})
889
+ context_length = model_config.get("context_length", 128000)
890
+ return int(context_length)
891
+ except Exception:
892
+ # Be safe; don't blow up status/compaction if model lookup fails
893
+ return 128000
894
+
895
+ def has_pending_tool_calls(self, messages: List[ModelMessage]) -> bool:
896
+ """
897
+ Check if there are any pending tool calls in the message history.
898
+
899
+ A pending tool call is one that has a ToolCallPart without a corresponding
900
+ ToolReturnPart. This indicates the model is still waiting for tool execution.
901
+
902
+ Returns:
903
+ True if there are pending tool calls, False otherwise
904
+ """
905
+ if not messages:
906
+ return False
907
+
908
+ tool_call_ids: Set[str] = set()
909
+ tool_return_ids: Set[str] = set()
910
+
911
+ # Collect all tool call and return IDs
912
+ for msg in messages:
913
+ for part in getattr(msg, "parts", []) or []:
914
+ tool_call_id = getattr(part, "tool_call_id", None)
915
+ if not tool_call_id:
916
+ continue
917
+
918
+ if part.part_kind == "tool-call":
919
+ tool_call_ids.add(tool_call_id)
920
+ elif part.part_kind == "tool-return":
921
+ tool_return_ids.add(tool_call_id)
922
+
923
+ # Pending tool calls are those without corresponding returns
924
+ pending_calls = tool_call_ids - tool_return_ids
925
+ return len(pending_calls) > 0
926
+
927
+ def request_delayed_compaction(self) -> None:
928
+ """
929
+ Request that compaction be attempted after the current tool calls complete.
930
+
931
+ This sets a global flag that will be checked during the next message
932
+ processing cycle to trigger compaction when it's safe to do so.
933
+ """
934
+ global _delayed_compaction_requested
935
+ _delayed_compaction_requested = True
936
+ emit_info(
937
+ "🔄 Delayed compaction requested - will attempt after tool calls complete",
938
+ message_group="token_context_status",
939
+ )
940
+
941
+ def should_attempt_delayed_compaction(self) -> bool:
942
+ """
943
+ Check if delayed compaction was requested and it's now safe to proceed.
944
+
945
+ Returns:
946
+ True if delayed compaction was requested and no tool calls are pending
947
+ """
948
+ global _delayed_compaction_requested
949
+ if not _delayed_compaction_requested:
950
+ return False
951
+
952
+ # Check if it's now safe to compact
953
+ messages = self.get_message_history()
954
+ if not self.has_pending_tool_calls(messages):
955
+ _delayed_compaction_requested = False # Reset the flag
956
+ return True
957
+
958
+ return False
959
+
960
+ def get_pending_tool_call_count(self, messages: List[ModelMessage]) -> int:
961
+ """
962
+ Get the count of pending tool calls for debugging purposes.
963
+
964
+ Returns:
965
+ Number of tool calls waiting for execution
966
+ """
967
+ if not messages:
968
+ return 0
969
+
970
+ tool_call_ids: Set[str] = set()
971
+ tool_return_ids: Set[str] = set()
972
+
973
+ for msg in messages:
974
+ for part in getattr(msg, "parts", []) or []:
975
+ tool_call_id = getattr(part, "tool_call_id", None)
976
+ if not tool_call_id:
977
+ continue
978
+
979
+ if part.part_kind == "tool-call":
980
+ tool_call_ids.add(tool_call_id)
981
+ elif part.part_kind == "tool-return":
982
+ tool_return_ids.add(tool_call_id)
983
+
984
+ pending_calls = tool_call_ids - tool_return_ids
985
+ return len(pending_calls)
986
+
987
+ def prune_interrupted_tool_calls(
988
+ self, messages: List[ModelMessage]
989
+ ) -> List[ModelMessage]:
990
+ """
991
+ Remove any messages that participate in mismatched tool call sequences.
992
+
993
+ A mismatched tool call id is one that appears in a ToolCall (model/tool request)
994
+ without a corresponding tool return, or vice versa. We preserve original order
995
+ and only drop messages that contain parts referencing mismatched tool_call_ids.
996
+ """
997
+ if not messages:
998
+ return messages
999
+
1000
+ tool_call_ids: Set[str] = set()
1001
+ tool_return_ids: Set[str] = set()
1002
+
1003
+ # First pass: collect ids for calls vs returns
1004
+ for msg in messages:
1005
+ for part in getattr(msg, "parts", []) or []:
1006
+ tool_call_id = getattr(part, "tool_call_id", None)
1007
+ if not tool_call_id:
1008
+ continue
1009
+ # Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
1010
+ # consider it a call; otherwise it's a return/result.
1011
+ if part.part_kind == "tool-call":
1012
+ tool_call_ids.add(tool_call_id)
1013
+ else:
1014
+ tool_return_ids.add(tool_call_id)
1015
+
1016
+ mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
1017
+ if not mismatched:
1018
+ return messages
1019
+
1020
+ pruned: List[ModelMessage] = []
1021
+ dropped_count = 0
1022
+ for msg in messages:
1023
+ has_mismatched = False
1024
+ for part in getattr(msg, "parts", []) or []:
1025
+ tcid = getattr(part, "tool_call_id", None)
1026
+ if tcid and tcid in mismatched:
1027
+ has_mismatched = True
1028
+ break
1029
+ if has_mismatched:
1030
+ dropped_count += 1
1031
+ continue
1032
+ pruned.append(msg)
1033
+ return pruned
1034
+
1035
+ def message_history_processor(
1036
+ self, ctx: RunContext, messages: List[ModelMessage]
1037
+ ) -> List[ModelMessage]:
1038
+ # First, prune any interrupted/mismatched tool-call conversations
1039
+ model_max = self.get_model_context_length()
1040
+
1041
+ message_tokens = sum(self.estimate_tokens_for_message(msg) for msg in messages)
1042
+ context_overhead = self.estimate_context_overhead_tokens()
1043
+ total_current_tokens = message_tokens + context_overhead
1044
+ proportion_used = total_current_tokens / model_max
1045
+
1046
+ context_summary = SpinnerBase.format_context_info(
1047
+ total_current_tokens, model_max, proportion_used
1048
+ )
1049
+ update_spinner_context(context_summary)
1050
+
1051
+ # Get the configured compaction threshold
1052
+ compaction_threshold = get_compaction_threshold()
1053
+
1054
+ # Get the configured compaction strategy
1055
+ compaction_strategy = get_compaction_strategy()
1056
+
1057
+ if proportion_used > compaction_threshold:
1058
+ # RACE CONDITION PROTECTION: Check for pending tool calls before summarization
1059
+ if compaction_strategy == "summarization" and self.has_pending_tool_calls(
1060
+ messages
1061
+ ):
1062
+ pending_count = self.get_pending_tool_call_count(messages)
1063
+ emit_warning(
1064
+ f"⚠️ Summarization deferred: {pending_count} pending tool call(s) detected. "
1065
+ "Waiting for tool execution to complete before compaction.",
1066
+ message_group="token_context_status",
1067
+ )
1068
+ # Request delayed compaction for when tool calls complete
1069
+ self.request_delayed_compaction()
1070
+ # Return original messages without compaction
1071
+ return messages, []
1072
+
1073
+ if compaction_strategy == "truncation":
1074
+ # Use truncation instead of summarization
1075
+ protected_tokens = get_protected_token_count()
1076
+ result_messages = self.truncation(
1077
+ self.filter_huge_messages(messages), protected_tokens
1078
+ )
1079
+ summarized_messages = [] # No summarization in truncation mode
1080
+ else:
1081
+ # Default to summarization (safe to proceed - no pending tool calls)
1082
+ result_messages, summarized_messages = self.summarize_messages(
1083
+ self.filter_huge_messages(messages)
1084
+ )
1085
+
1086
+ final_token_count = sum(
1087
+ self.estimate_tokens_for_message(msg) for msg in result_messages
1088
+ )
1089
+ # Update spinner with final token count
1090
+ final_summary = SpinnerBase.format_context_info(
1091
+ final_token_count, model_max, final_token_count / model_max
1092
+ )
1093
+ update_spinner_context(final_summary)
1094
+
1095
+ self.set_message_history(result_messages)
1096
+ for m in summarized_messages:
1097
+ self.add_compacted_message_hash(self.hash_message(m))
1098
+ return result_messages
1099
+ return messages
1100
+
1101
+ def truncation(
1102
+ self, messages: List[ModelMessage], protected_tokens: int
1103
+ ) -> List[ModelMessage]:
1104
+ """
1105
+ Truncate message history to manage token usage.
1106
+
1107
+ Protects:
1108
+ - The first message (system prompt) - always kept
1109
+ - The second message if it contains a ThinkingPart (extended thinking context)
1110
+ - The most recent messages up to protected_tokens
1111
+
1112
+ Args:
1113
+ messages: List of messages to truncate
1114
+ protected_tokens: Number of tokens to protect
1115
+
1116
+ Returns:
1117
+ Truncated list of messages
1118
+ """
1119
+ import queue
1120
+
1121
+ emit_info("Truncating message history to manage token usage")
1122
+ result = [messages[0]] # Always keep the first message (system prompt)
1123
+
1124
+ # Check if second message exists and contains a ThinkingPart
1125
+ # If so, protect it (extended thinking context shouldn't be lost)
1126
+ skip_second = False
1127
+ if len(messages) > 1:
1128
+ second_msg = messages[1]
1129
+ has_thinking = any(
1130
+ isinstance(part, ThinkingPart) for part in second_msg.parts
1131
+ )
1132
+ if has_thinking:
1133
+ result.append(second_msg)
1134
+ skip_second = True
1135
+
1136
+ num_tokens = 0
1137
+ stack = queue.LifoQueue()
1138
+
1139
+ # Determine which messages to consider for the recent-tokens window
1140
+ # Skip first message (already added), and skip second if it has thinking
1141
+ start_idx = 2 if skip_second else 1
1142
+ messages_to_scan = messages[start_idx:]
1143
+
1144
+ # Put messages in reverse order (most recent first) into the stack
1145
+ # but break when we exceed protected_tokens
1146
+ for msg in reversed(messages_to_scan):
1147
+ num_tokens += self.estimate_tokens_for_message(msg)
1148
+ if num_tokens > protected_tokens:
1149
+ break
1150
+ stack.put(msg)
1151
+
1152
+ # Pop messages from stack to get them in chronological order
1153
+ while not stack.empty():
1154
+ result.append(stack.get())
1155
+
1156
+ result = self.prune_interrupted_tool_calls(result)
1157
+ return result
1158
+
1159
+ def run_summarization_sync(
1160
+ self,
1161
+ instructions: str,
1162
+ message_history: List[ModelMessage],
1163
+ ) -> Union[List[ModelMessage], str]:
1164
+ """
1165
+ Run summarization synchronously using the configured summarization agent.
1166
+ This is exposed as a method so it can be overridden by subclasses if needed.
1167
+
1168
+ Args:
1169
+ instructions: Instructions for the summarization agent
1170
+ message_history: List of messages to summarize
1171
+
1172
+ Returns:
1173
+ Summarized messages or text
1174
+ """
1175
+ return run_summarization_sync(instructions, message_history)
1176
+
1177
+ # ===== Agent wiring formerly in code_puppy/agent.py =====
1178
+ def load_puppy_rules(self) -> Optional[str]:
1179
+ """Load AGENT(S).md from both global config and project directory.
1180
+
1181
+ Checks for AGENTS.md/AGENT.md/agents.md/agent.md in this order:
1182
+ 1. Global config directory (~/.code_puppy/ or XDG config)
1183
+ 2. Current working directory (project-specific)
1184
+
1185
+ If both exist, they are combined with global rules first, then project rules.
1186
+ This allows project-specific rules to override or extend global rules.
1187
+ """
1188
+ if self._puppy_rules is not None:
1189
+ return self._puppy_rules
1190
+ from pathlib import Path
1191
+
1192
+ possible_paths = ["AGENTS.md", "AGENT.md", "agents.md", "agent.md"]
1193
+
1194
+ # Load global rules from CONFIG_DIR
1195
+ global_rules = None
1196
+ from code_puppy.config import CONFIG_DIR
1197
+
1198
+ for path_str in possible_paths:
1199
+ global_path = Path(CONFIG_DIR) / path_str
1200
+ if global_path.exists():
1201
+ global_rules = global_path.read_text(encoding="utf-8-sig")
1202
+ break
1203
+
1204
+ # Load project-local rules from current working directory
1205
+ project_rules = None
1206
+ for path_str in possible_paths:
1207
+ project_path = Path(path_str)
1208
+ if project_path.exists():
1209
+ project_rules = project_path.read_text(encoding="utf-8-sig")
1210
+ break
1211
+
1212
+ # Combine global and project rules
1213
+ # Global rules come first, project rules second (allowing project to override)
1214
+ rules = [r for r in [global_rules, project_rules] if r]
1215
+ self._puppy_rules = "\n\n".join(rules) if rules else None
1216
+ return self._puppy_rules
1217
+
1218
+ def load_mcp_servers(self, extra_headers: Optional[Dict[str, str]] = None):
1219
+ """Load MCP servers through the manager and return pydantic-ai compatible servers.
1220
+
1221
+ Note: The manager automatically syncs from mcp_servers.json during initialization,
1222
+ so we don't need to sync here. Use reload_mcp_servers() to force a re-sync.
1223
+ """
1224
+
1225
+ mcp_disabled = get_value("disable_mcp_servers")
1226
+ if mcp_disabled and str(mcp_disabled).lower() in ("1", "true", "yes", "on"):
1227
+ return []
1228
+
1229
+ manager = get_mcp_manager()
1230
+ return manager.get_servers_for_agent()
1231
+
1232
+ def reload_mcp_servers(self):
1233
+ """Reload MCP servers and return updated servers.
1234
+
1235
+ Forces a re-sync from mcp_servers.json to pick up any configuration changes.
1236
+ """
1237
+ # Clear the MCP tool cache when servers are reloaded
1238
+ self._mcp_tool_definitions_cache = []
1239
+
1240
+ # Force re-sync from mcp_servers.json
1241
+ manager = get_mcp_manager()
1242
+ manager.sync_from_config()
1243
+
1244
+ return manager.get_servers_for_agent()
1245
+
1246
+ def _load_model_with_fallback(
1247
+ self,
1248
+ requested_model_name: str,
1249
+ models_config: Dict[str, Any],
1250
+ message_group: str,
1251
+ ) -> Tuple[Any, str]:
1252
+ """Load the requested model, applying a friendly fallback when unavailable."""
1253
+ try:
1254
+ model = ModelFactory.get_model(requested_model_name, models_config)
1255
+ return model, requested_model_name
1256
+ except ValueError as exc:
1257
+ available_models = list(models_config.keys())
1258
+ available_str = (
1259
+ ", ".join(sorted(available_models))
1260
+ if available_models
1261
+ else "no configured models"
1262
+ )
1263
+ emit_warning(
1264
+ (
1265
+ f"Model '{requested_model_name}' not found. "
1266
+ f"Available models: {available_str}"
1267
+ ),
1268
+ message_group=message_group,
1269
+ )
1270
+
1271
+ fallback_candidates: List[str] = []
1272
+ global_candidate = get_global_model_name()
1273
+ if global_candidate:
1274
+ fallback_candidates.append(global_candidate)
1275
+
1276
+ for candidate in available_models:
1277
+ if candidate not in fallback_candidates:
1278
+ fallback_candidates.append(candidate)
1279
+
1280
+ for candidate in fallback_candidates:
1281
+ if not candidate or candidate == requested_model_name:
1282
+ continue
1283
+ try:
1284
+ model = ModelFactory.get_model(candidate, models_config)
1285
+ emit_info(
1286
+ f"Using fallback model: {candidate}",
1287
+ message_group=message_group,
1288
+ )
1289
+ return model, candidate
1290
+ except ValueError:
1291
+ continue
1292
+
1293
+ friendly_message = (
1294
+ "No valid model could be loaded. Update the model configuration or set "
1295
+ "a valid model with `config set`."
1296
+ )
1297
+ emit_error(
1298
+ friendly_message,
1299
+ message_group=message_group,
1300
+ )
1301
+ raise ValueError(friendly_message) from exc
1302
+
1303
+ def reload_code_generation_agent(self, message_group: Optional[str] = None):
1304
+ """Force-reload the pydantic-ai Agent based on current config and model."""
1305
+ from code_puppy.tools import (
1306
+ EXTENDED_THINKING_PROMPT_NOTE,
1307
+ has_extended_thinking_active,
1308
+ register_tools_for_agent,
1309
+ )
1310
+
1311
+ if message_group is None:
1312
+ message_group = str(uuid.uuid4())
1313
+
1314
+ model_name = self.get_model_name()
1315
+
1316
+ models_config = ModelFactory.load_config()
1317
+ model, resolved_model_name = self._load_model_with_fallback(
1318
+ model_name,
1319
+ models_config,
1320
+ message_group,
1321
+ )
1322
+
1323
+ instructions = self.get_full_system_prompt()
1324
+ puppy_rules = self.load_puppy_rules()
1325
+ if puppy_rules:
1326
+ instructions += f"\n{puppy_rules}"
1327
+
1328
+ mcp_servers = self.load_mcp_servers()
1329
+
1330
+ model_settings = make_model_settings(resolved_model_name)
1331
+
1332
+ # Handle claude-code models: swap instructions (prompt prepending happens in run_with_mcp)
1333
+ from code_puppy.model_utils import prepare_prompt_for_model
1334
+
1335
+ # When extended thinking is active, nudge the model to think between
1336
+ # tool calls (the share_your_reasoning tool is stripped in this case).
1337
+ if has_extended_thinking_active(resolved_model_name):
1338
+ instructions += EXTENDED_THINKING_PROMPT_NOTE
1339
+
1340
+ prepared = prepare_prompt_for_model(
1341
+ model_name, instructions, "", prepend_system_to_user=False
1342
+ )
1343
+ instructions = prepared.instructions
1344
+
1345
+ self.cur_model = model
1346
+ p_agent = PydanticAgent(
1347
+ model=model,
1348
+ instructions=instructions,
1349
+ output_type=str,
1350
+ retries=10,
1351
+ toolsets=mcp_servers,
1352
+ history_processors=[self.message_history_accumulator],
1353
+ model_settings=model_settings,
1354
+ )
1355
+
1356
+ agent_tools = self.get_available_tools()
1357
+ register_tools_for_agent(p_agent, agent_tools, model_name=resolved_model_name)
1358
+
1359
+ # Get existing tool names to filter out conflicts with MCP tools
1360
+ existing_tool_names = set()
1361
+ try:
1362
+ # Get tools from the agent to find existing tool names
1363
+ tools = getattr(p_agent, "_tools", None)
1364
+ if tools:
1365
+ existing_tool_names = set(tools.keys())
1366
+ except Exception:
1367
+ # If we can't get tool names, proceed without filtering
1368
+ pass
1369
+
1370
+ # Filter MCP server toolsets to remove conflicting tools
1371
+ filtered_mcp_servers = []
1372
+ if mcp_servers and existing_tool_names:
1373
+ for mcp_server in mcp_servers:
1374
+ try:
1375
+ # Get tools from this MCP server
1376
+ server_tools = getattr(mcp_server, "tools", None)
1377
+ if server_tools:
1378
+ # Filter out conflicting tools
1379
+ filtered_tools = {}
1380
+ for tool_name, tool_func in server_tools.items():
1381
+ if tool_name not in existing_tool_names:
1382
+ filtered_tools[tool_name] = tool_func
1383
+
1384
+ # Create a filtered version of the MCP server if we have tools
1385
+ if filtered_tools:
1386
+ # Create a new toolset with filtered tools
1387
+ from pydantic_ai.tools import ToolSet
1388
+
1389
+ filtered_toolset = ToolSet()
1390
+ for tool_name, tool_func in filtered_tools.items():
1391
+ filtered_toolset._tools[tool_name] = tool_func
1392
+ filtered_mcp_servers.append(filtered_toolset)
1393
+ else:
1394
+ # No tools left after filtering, skip this server
1395
+ pass
1396
+ else:
1397
+ # Can't get tools from this server, include as-is
1398
+ filtered_mcp_servers.append(mcp_server)
1399
+ except Exception:
1400
+ # Error processing this server, include as-is to be safe
1401
+ filtered_mcp_servers.append(mcp_server)
1402
+ else:
1403
+ # No filtering needed or possible
1404
+ filtered_mcp_servers = mcp_servers if mcp_servers else []
1405
+
1406
+ if len(filtered_mcp_servers) != len(mcp_servers):
1407
+ emit_info(
1408
+ Text.from_markup(
1409
+ f"[dim]Filtered {len(mcp_servers) - len(filtered_mcp_servers)} conflicting MCP tools[/dim]"
1410
+ )
1411
+ )
1412
+
1413
+ self._last_model_name = resolved_model_name
1414
+ # expose for run_with_mcp
1415
+ # Wrap it with DBOS, but handle MCP servers separately to avoid serialization issues
1416
+ global _reload_count
1417
+ _reload_count += 1
1418
+ if get_use_dbos():
1419
+ # Don't pass MCP servers to the agent constructor when using DBOS
1420
+ # This prevents the "cannot pickle async_generator object" error
1421
+ # MCP servers will be handled separately in run_with_mcp
1422
+ agent_without_mcp = PydanticAgent(
1423
+ model=model,
1424
+ instructions=instructions,
1425
+ output_type=str,
1426
+ retries=10,
1427
+ toolsets=[], # Don't include MCP servers here
1428
+ history_processors=[self.message_history_accumulator],
1429
+ model_settings=model_settings,
1430
+ )
1431
+
1432
+ # Register regular tools (non-MCP) on the new agent
1433
+ agent_tools = self.get_available_tools()
1434
+ register_tools_for_agent(
1435
+ agent_without_mcp, agent_tools, model_name=resolved_model_name
1436
+ )
1437
+
1438
+ # Wrap with DBOS - pass event_stream_handler at construction time
1439
+ # so DBOSModel gets the handler for streaming output
1440
+ dbos_agent = DBOSAgent(
1441
+ agent_without_mcp,
1442
+ name=f"{self.name}-{_reload_count}",
1443
+ event_stream_handler=event_stream_handler,
1444
+ )
1445
+ self.pydantic_agent = dbos_agent
1446
+ self._code_generation_agent = dbos_agent
1447
+
1448
+ # Store filtered MCP servers separately for runtime use
1449
+ self._mcp_servers = filtered_mcp_servers
1450
+ else:
1451
+ # Normal path without DBOS - include filtered MCP servers in the agent
1452
+ # Re-create agent with filtered MCP servers
1453
+ p_agent = PydanticAgent(
1454
+ model=model,
1455
+ instructions=instructions,
1456
+ output_type=str,
1457
+ retries=10,
1458
+ toolsets=filtered_mcp_servers,
1459
+ history_processors=[self.message_history_accumulator],
1460
+ model_settings=model_settings,
1461
+ )
1462
+ # Register regular tools on the agent
1463
+ agent_tools = self.get_available_tools()
1464
+ register_tools_for_agent(
1465
+ p_agent, agent_tools, model_name=resolved_model_name
1466
+ )
1467
+
1468
+ self.pydantic_agent = p_agent
1469
+ self._code_generation_agent = p_agent
1470
+ self._mcp_servers = filtered_mcp_servers
1471
+ self._mcp_servers = mcp_servers
1472
+ return self._code_generation_agent
1473
+
1474
+ def _create_agent_with_output_type(self, output_type: Type[Any]) -> PydanticAgent:
1475
+ """Create a temporary agent configured with a custom output_type.
1476
+
1477
+ This is used when structured output is requested via run_with_mcp.
1478
+ The agent is created fresh with the same configuration as the main agent
1479
+ but with the specified output_type instead of str.
1480
+
1481
+ Args:
1482
+ output_type: The Pydantic model or type for structured output.
1483
+
1484
+ Returns:
1485
+ A configured PydanticAgent (or DBOSAgent wrapper) with the custom output_type.
1486
+ """
1487
+ from code_puppy.model_utils import prepare_prompt_for_model
1488
+ from code_puppy.tools import (
1489
+ EXTENDED_THINKING_PROMPT_NOTE,
1490
+ has_extended_thinking_active,
1491
+ register_tools_for_agent,
1492
+ )
1493
+
1494
+ model_name = self.get_model_name()
1495
+ models_config = ModelFactory.load_config()
1496
+ model, resolved_model_name = self._load_model_with_fallback(
1497
+ model_name, models_config, str(uuid.uuid4())
1498
+ )
1499
+
1500
+ instructions = self.get_full_system_prompt()
1501
+ puppy_rules = self.load_puppy_rules()
1502
+ if puppy_rules:
1503
+ instructions += f"\n{puppy_rules}"
1504
+
1505
+ mcp_servers = getattr(self, "_mcp_servers", []) or []
1506
+ model_settings = make_model_settings(resolved_model_name)
1507
+
1508
+ prepared = prepare_prompt_for_model(
1509
+ model_name, instructions, "", prepend_system_to_user=False
1510
+ )
1511
+ instructions = prepared.instructions
1512
+
1513
+ # When extended thinking is active, nudge the model to think between
1514
+ # tool calls (the share_your_reasoning tool is stripped in this case).
1515
+ if has_extended_thinking_active(resolved_model_name):
1516
+ instructions += EXTENDED_THINKING_PROMPT_NOTE
1517
+
1518
+ global _reload_count
1519
+ _reload_count += 1
1520
+
1521
+ if get_use_dbos():
1522
+ temp_agent = PydanticAgent(
1523
+ model=model,
1524
+ instructions=instructions,
1525
+ output_type=output_type,
1526
+ retries=10,
1527
+ toolsets=[],
1528
+ history_processors=[self.message_history_accumulator],
1529
+ model_settings=model_settings,
1530
+ )
1531
+ agent_tools = self.get_available_tools()
1532
+ register_tools_for_agent(
1533
+ temp_agent, agent_tools, model_name=resolved_model_name
1534
+ )
1535
+ # Pass event_stream_handler at construction time for streaming output
1536
+ dbos_agent = DBOSAgent(
1537
+ temp_agent,
1538
+ name=f"{self.name}-structured-{_reload_count}",
1539
+ event_stream_handler=event_stream_handler,
1540
+ )
1541
+ return dbos_agent
1542
+ else:
1543
+ temp_agent = PydanticAgent(
1544
+ model=model,
1545
+ instructions=instructions,
1546
+ output_type=output_type,
1547
+ retries=10,
1548
+ toolsets=mcp_servers,
1549
+ history_processors=[self.message_history_accumulator],
1550
+ model_settings=model_settings,
1551
+ )
1552
+ agent_tools = self.get_available_tools()
1553
+ register_tools_for_agent(
1554
+ temp_agent, agent_tools, model_name=resolved_model_name
1555
+ )
1556
+ return temp_agent
1557
+
1558
+ # It's okay to decorate it with DBOS.step even if not using DBOS; the decorator is a no-op in that case.
1559
+ @DBOS.step()
1560
+ def message_history_accumulator(self, ctx: RunContext, messages: List[Any]):
1561
+ _message_history = self.get_message_history()
1562
+
1563
+ # Hook: on_message_history_processor_start - dump the message history before processing
1564
+ on_message_history_processor_start(
1565
+ agent_name=self.name,
1566
+ session_id=getattr(self, "session_id", None),
1567
+ message_history=list(_message_history), # Copy to avoid mutation issues
1568
+ incoming_messages=list(messages),
1569
+ )
1570
+ message_history_hashes = set([self.hash_message(m) for m in _message_history])
1571
+ messages_added = 0
1572
+ last_msg_index = len(messages) - 1
1573
+ for i, msg in enumerate(messages):
1574
+ msg_hash = self.hash_message(msg)
1575
+ if msg_hash not in message_history_hashes:
1576
+ # Always preserve the last message (the user's new prompt) even
1577
+ # if its hash matches a previously compacted/summarized message.
1578
+ # Short or repeated prompts (e.g. "yes", "1") can collide with
1579
+ # compacted hashes, which would silently drop the user's input
1580
+ # and leave the history ending with a ModelResponse. That
1581
+ # triggers an Anthropic API error: "This model does not support
1582
+ # assistant message prefill."
1583
+ if (
1584
+ i == last_msg_index
1585
+ or msg_hash not in self.get_compacted_message_hashes()
1586
+ ):
1587
+ _message_history.append(msg)
1588
+ messages_added += 1
1589
+
1590
+ # Apply message history trimming using the main processor
1591
+ # This ensures we maintain global state while still managing context limits
1592
+ self.message_history_processor(ctx, _message_history)
1593
+ result_messages_filtered_empty_thinking = []
1594
+ filtered_count = 0
1595
+ for msg in self.get_message_history():
1596
+ # Filter out single-part messages that are empty ThinkingParts
1597
+ if len(msg.parts) == 1 and isinstance(msg.parts[0], ThinkingPart):
1598
+ if not msg.parts[0].content:
1599
+ filtered_count += 1
1600
+ continue
1601
+ # For multi-part messages, strip empty ThinkingParts but keep the message
1602
+ elif any(isinstance(p, ThinkingPart) and not p.content for p in msg.parts):
1603
+ msg = dataclasses.replace(
1604
+ msg,
1605
+ parts=[
1606
+ p
1607
+ for p in msg.parts
1608
+ if not (isinstance(p, ThinkingPart) and not p.content)
1609
+ ],
1610
+ )
1611
+ if not msg.parts:
1612
+ filtered_count += 1
1613
+ continue
1614
+ result_messages_filtered_empty_thinking.append(msg)
1615
+ self.set_message_history(result_messages_filtered_empty_thinking)
1616
+
1617
+ # Safety net: ensure history always ends with a ModelRequest.
1618
+ # If compaction or filtering somehow leaves a trailing ModelResponse,
1619
+ # the Anthropic API will reject it with a prefill error.
1620
+ final_history = self.ensure_history_ends_with_request(
1621
+ self.get_message_history()
1622
+ )
1623
+ if final_history != self.get_message_history():
1624
+ self.set_message_history(final_history)
1625
+
1626
+ # Hook: on_message_history_processor_end - dump the message history after processing
1627
+ messages_filtered = len(messages) - messages_added + filtered_count
1628
+ on_message_history_processor_end(
1629
+ agent_name=self.name,
1630
+ session_id=getattr(self, "session_id", None),
1631
+ message_history=list(final_history), # Copy to avoid mutation issues
1632
+ messages_added=messages_added,
1633
+ messages_filtered=messages_filtered,
1634
+ )
1635
+
1636
+ return final_history
1637
+
1638
+ def _spawn_ctrl_x_key_listener(
1639
+ self,
1640
+ stop_event: threading.Event,
1641
+ on_escape: Callable[[], None],
1642
+ on_cancel_agent: Optional[Callable[[], None]] = None,
1643
+ ) -> Optional[threading.Thread]:
1644
+ """Start a keyboard listener thread for CLI sessions.
1645
+
1646
+ Listens for Ctrl+X (shell command cancel) and optionally the configured
1647
+ cancel_agent_key (when not using SIGINT/Ctrl+C).
1648
+
1649
+ Args:
1650
+ stop_event: Event to signal the listener to stop.
1651
+ on_escape: Callback for Ctrl+X (shell command cancel).
1652
+ on_cancel_agent: Optional callback for cancel_agent_key (only used
1653
+ when cancel_agent_uses_signal() returns False).
1654
+ """
1655
+ try:
1656
+ import sys
1657
+ except ImportError:
1658
+ return None
1659
+
1660
+ stdin = getattr(sys, "stdin", None)
1661
+ if stdin is None or not hasattr(stdin, "isatty"):
1662
+ return None
1663
+ try:
1664
+ if not stdin.isatty():
1665
+ return None
1666
+ except Exception:
1667
+ return None
1668
+
1669
+ def listener() -> None:
1670
+ try:
1671
+ if sys.platform.startswith("win"):
1672
+ self._listen_for_ctrl_x_windows(
1673
+ stop_event, on_escape, on_cancel_agent
1674
+ )
1675
+ else:
1676
+ self._listen_for_ctrl_x_posix(
1677
+ stop_event, on_escape, on_cancel_agent
1678
+ )
1679
+ except Exception:
1680
+ emit_warning(
1681
+ "Key listener stopped unexpectedly; press Ctrl+C to cancel."
1682
+ )
1683
+
1684
+ thread = threading.Thread(
1685
+ target=listener, name="code-puppy-key-listener", daemon=True
1686
+ )
1687
+ thread.start()
1688
+ return thread
1689
+
1690
+ def _listen_for_ctrl_x_windows(
1691
+ self,
1692
+ stop_event: threading.Event,
1693
+ on_escape: Callable[[], None],
1694
+ on_cancel_agent: Optional[Callable[[], None]] = None,
1695
+ ) -> None:
1696
+ import msvcrt
1697
+ import time
1698
+
1699
+ # Get the cancel agent char code if we're using keyboard-based cancel
1700
+ cancel_agent_char: Optional[str] = None
1701
+ if on_cancel_agent is not None and not cancel_agent_uses_signal():
1702
+ cancel_agent_char = get_cancel_agent_char_code()
1703
+
1704
+ while not stop_event.is_set():
1705
+ try:
1706
+ if msvcrt.kbhit():
1707
+ key = msvcrt.getwch()
1708
+ if key == "\x18": # Ctrl+X
1709
+ try:
1710
+ on_escape()
1711
+ except Exception:
1712
+ emit_warning(
1713
+ "Ctrl+X handler raised unexpectedly; Ctrl+C still works."
1714
+ )
1715
+ elif (
1716
+ cancel_agent_char
1717
+ and on_cancel_agent
1718
+ and key == cancel_agent_char
1719
+ ):
1720
+ try:
1721
+ on_cancel_agent()
1722
+ except Exception:
1723
+ emit_warning("Cancel agent handler raised unexpectedly.")
1724
+ except Exception:
1725
+ emit_warning(
1726
+ "Windows key listener error; Ctrl+C is still available for cancel."
1727
+ )
1728
+ return
1729
+ time.sleep(0.05)
1730
+
1731
+ def _listen_for_ctrl_x_posix(
1732
+ self,
1733
+ stop_event: threading.Event,
1734
+ on_escape: Callable[[], None],
1735
+ on_cancel_agent: Optional[Callable[[], None]] = None,
1736
+ ) -> None:
1737
+ import select
1738
+ import sys
1739
+ import termios
1740
+ import tty
1741
+
1742
+ # Get the cancel agent char code if we're using keyboard-based cancel
1743
+ cancel_agent_char: Optional[str] = None
1744
+ if on_cancel_agent is not None and not cancel_agent_uses_signal():
1745
+ cancel_agent_char = get_cancel_agent_char_code()
1746
+
1747
+ stdin = sys.stdin
1748
+ try:
1749
+ fd = stdin.fileno()
1750
+ except (AttributeError, ValueError, OSError):
1751
+ return
1752
+ try:
1753
+ original_attrs = termios.tcgetattr(fd)
1754
+ except Exception:
1755
+ return
1756
+
1757
+ try:
1758
+ tty.setcbreak(fd)
1759
+ while not stop_event.is_set():
1760
+ try:
1761
+ read_ready, _, _ = select.select([stdin], [], [], 0.05)
1762
+ except Exception:
1763
+ break
1764
+ if not read_ready:
1765
+ continue
1766
+ data = stdin.read(1)
1767
+ if not data:
1768
+ break
1769
+ if data == "\x18": # Ctrl+X
1770
+ try:
1771
+ on_escape()
1772
+ except Exception:
1773
+ emit_warning(
1774
+ "Ctrl+X handler raised unexpectedly; Ctrl+C still works."
1775
+ )
1776
+ elif (
1777
+ cancel_agent_char and on_cancel_agent and data == cancel_agent_char
1778
+ ):
1779
+ try:
1780
+ on_cancel_agent()
1781
+ except Exception:
1782
+ emit_warning("Cancel agent handler raised unexpectedly.")
1783
+ finally:
1784
+ termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs)
1785
+
1786
+ async def run_with_mcp(
1787
+ self,
1788
+ prompt: str,
1789
+ *,
1790
+ attachments: Optional[Sequence[BinaryContent]] = None,
1791
+ link_attachments: Optional[Sequence[Union[ImageUrl, DocumentUrl]]] = None,
1792
+ output_type: Optional[Type[Any]] = None,
1793
+ **kwargs,
1794
+ ) -> Any:
1795
+ """Run the agent with MCP servers, attachments, and full cancellation support.
1796
+
1797
+ Args:
1798
+ prompt: Primary user prompt text (may be empty when attachments present).
1799
+ attachments: Local binary payloads (e.g., dragged images) to include.
1800
+ link_attachments: Remote assets (image/document URLs) to include.
1801
+ output_type: Optional Pydantic model or type for structured output.
1802
+ When provided, creates a temporary agent configured to return
1803
+ this type instead of the default string output.
1804
+ **kwargs: Additional arguments forwarded to `pydantic_ai.Agent.run`.
1805
+
1806
+ Returns:
1807
+ The agent's response (typed according to output_type if specified).
1808
+
1809
+ Raises:
1810
+ asyncio.CancelledError: When execution is cancelled by user.
1811
+ """
1812
+ # Sanitize prompt to remove invalid Unicode surrogates that can cause
1813
+ # encoding errors (especially common on Windows with copy-paste)
1814
+ if prompt:
1815
+ try:
1816
+ prompt = prompt.encode("utf-8", errors="surrogatepass").decode(
1817
+ "utf-8", errors="replace"
1818
+ )
1819
+ except (UnicodeEncodeError, UnicodeDecodeError):
1820
+ # Fallback: filter out surrogate characters directly
1821
+ prompt = "".join(
1822
+ char if ord(char) < 0xD800 or ord(char) > 0xDFFF else "\ufffd"
1823
+ for char in prompt
1824
+ )
1825
+
1826
+ group_id = str(uuid.uuid4())
1827
+ # Avoid double-loading: reuse existing agent if already built
1828
+ pydantic_agent = (
1829
+ self._code_generation_agent or self.reload_code_generation_agent()
1830
+ )
1831
+
1832
+ # If a custom output_type is specified, create a temporary agent with that type
1833
+ if output_type is not None:
1834
+ pydantic_agent = self._create_agent_with_output_type(output_type)
1835
+
1836
+ # Handle model-specific prompt transformations via prepare_prompt_for_model()
1837
+ # This uses the get_model_system_prompt hook, so plugins can register their own handlers
1838
+ from code_puppy.model_utils import prepare_prompt_for_model
1839
+
1840
+ # Only prepend system prompt on first message (empty history)
1841
+ should_prepend = len(self.get_message_history()) == 0
1842
+ if should_prepend:
1843
+ system_prompt = self.get_full_system_prompt()
1844
+ puppy_rules = self.load_puppy_rules()
1845
+ if puppy_rules:
1846
+ system_prompt += f"\n{puppy_rules}"
1847
+
1848
+ prepared = prepare_prompt_for_model(
1849
+ model_name=self.get_model_name(),
1850
+ system_prompt=system_prompt,
1851
+ user_prompt=prompt,
1852
+ prepend_system_to_user=True,
1853
+ )
1854
+ prompt = prepared.user_prompt
1855
+
1856
+ # Build combined prompt payload when attachments are provided.
1857
+ attachment_parts: List[Any] = []
1858
+ if attachments:
1859
+ attachment_parts.extend(list(attachments))
1860
+ if link_attachments:
1861
+ attachment_parts.extend(list(link_attachments))
1862
+
1863
+ if attachment_parts:
1864
+ prompt_payload: Union[str, List[Any]] = []
1865
+ if prompt:
1866
+ prompt_payload.append(prompt)
1867
+ prompt_payload.extend(attachment_parts)
1868
+ else:
1869
+ prompt_payload = prompt
1870
+
1871
+ async def run_agent_task():
1872
+ try:
1873
+ self.set_message_history(
1874
+ self.prune_interrupted_tool_calls(self.get_message_history())
1875
+ )
1876
+
1877
+ # DELAYED COMPACTION: Check if we should attempt delayed compaction
1878
+ if self.should_attempt_delayed_compaction():
1879
+ emit_info(
1880
+ "🔄 Attempting delayed compaction (tool calls completed)",
1881
+ message_group="token_context_status",
1882
+ )
1883
+ current_messages = self.get_message_history()
1884
+ compacted_messages, _ = self.compact_messages(current_messages)
1885
+ if compacted_messages != current_messages:
1886
+ self.set_message_history(compacted_messages)
1887
+ emit_info(
1888
+ "✅ Delayed compaction completed successfully",
1889
+ message_group="token_context_status",
1890
+ )
1891
+
1892
+ usage_limits = UsageLimits(request_limit=get_message_limit())
1893
+
1894
+ # Handle MCP servers - add them temporarily when using DBOS
1895
+ if (
1896
+ get_use_dbos()
1897
+ and hasattr(self, "_mcp_servers")
1898
+ and self._mcp_servers
1899
+ ):
1900
+ # Temporarily add MCP servers to the DBOS agent using internal _toolsets
1901
+ original_toolsets = pydantic_agent._toolsets
1902
+ pydantic_agent._toolsets = original_toolsets + self._mcp_servers
1903
+ pydantic_agent._toolsets = original_toolsets + self._mcp_servers
1904
+
1905
+ try:
1906
+ # Set the workflow ID for DBOS context so DBOS and the agent ID match
1907
+ with SetWorkflowID(group_id):
1908
+ result_ = await pydantic_agent.run(
1909
+ prompt_payload,
1910
+ message_history=self.get_message_history(),
1911
+ usage_limits=usage_limits,
1912
+ event_stream_handler=event_stream_handler,
1913
+ **kwargs,
1914
+ )
1915
+ return result_
1916
+ finally:
1917
+ # Always restore original toolsets
1918
+ pydantic_agent._toolsets = original_toolsets
1919
+ elif get_use_dbos():
1920
+ with SetWorkflowID(group_id):
1921
+ result_ = await pydantic_agent.run(
1922
+ prompt_payload,
1923
+ message_history=self.get_message_history(),
1924
+ usage_limits=usage_limits,
1925
+ event_stream_handler=event_stream_handler,
1926
+ **kwargs,
1927
+ )
1928
+ return result_
1929
+ else:
1930
+ # Non-DBOS path (MCP servers are already included)
1931
+ result_ = await pydantic_agent.run(
1932
+ prompt_payload,
1933
+ message_history=self.get_message_history(),
1934
+ usage_limits=usage_limits,
1935
+ event_stream_handler=event_stream_handler,
1936
+ **kwargs,
1937
+ )
1938
+ return result_
1939
+ except* UsageLimitExceeded as ule:
1940
+ emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
1941
+ emit_info(
1942
+ "The agent has reached its usage limit. You can ask it to continue by saying 'please continue' or similar.",
1943
+ group_id=group_id,
1944
+ )
1945
+ except* mcp.shared.exceptions.McpError as mcp_error:
1946
+ emit_info(f"MCP server error: {str(mcp_error)}", group_id=group_id)
1947
+ emit_info(f"{str(mcp_error)}", group_id=group_id)
1948
+ emit_info(
1949
+ "Try disabling any malfunctioning MCP servers", group_id=group_id
1950
+ )
1951
+ except* asyncio.exceptions.CancelledError:
1952
+ emit_info("Cancelled")
1953
+ if get_use_dbos():
1954
+ await DBOS.cancel_workflow_async(group_id)
1955
+ except* InterruptedError as ie:
1956
+ emit_info(f"Interrupted: {str(ie)}")
1957
+ if get_use_dbos():
1958
+ await DBOS.cancel_workflow_async(group_id)
1959
+ except* Exception as other_error:
1960
+ # Filter out CancelledError and UsageLimitExceeded from the exception group - let it propagate
1961
+ remaining_exceptions = []
1962
+
1963
+ def collect_non_cancelled_exceptions(exc):
1964
+ if isinstance(exc, ExceptionGroup):
1965
+ for sub_exc in exc.exceptions:
1966
+ collect_non_cancelled_exceptions(sub_exc)
1967
+ elif not isinstance(
1968
+ exc, (asyncio.CancelledError, UsageLimitExceeded)
1969
+ ):
1970
+ remaining_exceptions.append(exc)
1971
+ emit_info(f"Unexpected error: {str(exc)}", group_id=group_id)
1972
+ emit_info(f"{str(exc.args)}", group_id=group_id)
1973
+ # Log to file for debugging
1974
+ log_error(
1975
+ exc,
1976
+ context=f"Agent run (group_id={group_id})",
1977
+ include_traceback=True,
1978
+ )
1979
+
1980
+ collect_non_cancelled_exceptions(other_error)
1981
+
1982
+ # If there are CancelledError exceptions in the group, re-raise them
1983
+ cancelled_exceptions = []
1984
+
1985
+ def collect_cancelled_exceptions(exc):
1986
+ if isinstance(exc, ExceptionGroup):
1987
+ for sub_exc in exc.exceptions:
1988
+ collect_cancelled_exceptions(sub_exc)
1989
+ elif isinstance(exc, asyncio.CancelledError):
1990
+ cancelled_exceptions.append(exc)
1991
+
1992
+ collect_cancelled_exceptions(other_error)
1993
+ finally:
1994
+ self.set_message_history(
1995
+ self.prune_interrupted_tool_calls(self.get_message_history())
1996
+ )
1997
+
1998
+ # Create the task FIRST
1999
+ agent_task = asyncio.create_task(run_agent_task())
2000
+
2001
+ # Fire agent_run_start hook - plugins can use this to start background tasks
2002
+ # (e.g., token refresh heartbeats for OAuth models)
2003
+ try:
2004
+ await on_agent_run_start(
2005
+ agent_name=self.name,
2006
+ model_name=self.get_model_name(),
2007
+ session_id=group_id,
2008
+ )
2009
+ except Exception:
2010
+ pass # Don't fail agent run if hook fails
2011
+
2012
+ loop = asyncio.get_running_loop()
2013
+
2014
+ def schedule_agent_cancel() -> None:
2015
+ from code_puppy.tools.command_runner import _RUNNING_PROCESSES
2016
+
2017
+ if len(_RUNNING_PROCESSES):
2018
+ emit_warning(
2019
+ "Refusing to cancel Agent while a shell command is currently running - press Ctrl+X to cancel the shell command."
2020
+ )
2021
+ return
2022
+ if agent_task.done():
2023
+ return
2024
+
2025
+ # Cancel all active subagent tasks
2026
+ if _active_subagent_tasks:
2027
+ emit_warning(
2028
+ f"Cancelling {len(_active_subagent_tasks)} active subagent task(s)..."
2029
+ )
2030
+ for task in list(
2031
+ _active_subagent_tasks
2032
+ ): # Create a copy since we'll be modifying the set
2033
+ if not task.done():
2034
+ loop.call_soon_threadsafe(task.cancel)
2035
+ loop.call_soon_threadsafe(agent_task.cancel)
2036
+
2037
+ def keyboard_interrupt_handler(_sig, _frame):
2038
+ # If we're awaiting user input (e.g., file permission prompt),
2039
+ # don't cancel the agent - let the input() call handle the interrupt naturally
2040
+ if is_awaiting_user_input():
2041
+ # Don't do anything here - let the input() call raise KeyboardInterrupt naturally
2042
+ return
2043
+
2044
+ schedule_agent_cancel()
2045
+
2046
+ def graceful_sigint_handler(_sig, _frame):
2047
+ # When using keyboard-based cancel, SIGINT should be a no-op
2048
+ # (just show a hint to user about the configured cancel key)
2049
+ # Also reset terminal to prevent bricking on Windows+uvx
2050
+ from code_puppy.keymap import get_cancel_agent_display_name
2051
+ from code_puppy.terminal_utils import reset_windows_terminal_full
2052
+
2053
+ # Reset terminal state first to prevent bricking
2054
+ reset_windows_terminal_full()
2055
+
2056
+ cancel_key = get_cancel_agent_display_name()
2057
+ emit_info(f"Use {cancel_key} to cancel the agent task.")
2058
+
2059
+ original_handler = None
2060
+ key_listener_stop_event = None
2061
+ _key_listener_thread = None
2062
+
2063
+ try:
2064
+ if cancel_agent_uses_signal():
2065
+ # Use SIGINT-based cancellation (default Ctrl+C behavior)
2066
+ original_handler = signal.signal(
2067
+ signal.SIGINT, keyboard_interrupt_handler
2068
+ )
2069
+ else:
2070
+ # Use keyboard listener for agent cancellation
2071
+ # Set a graceful SIGINT handler that shows a hint
2072
+ original_handler = signal.signal(signal.SIGINT, graceful_sigint_handler)
2073
+ # Spawn keyboard listener with the cancel agent callback
2074
+ key_listener_stop_event = threading.Event()
2075
+ _key_listener_thread = self._spawn_ctrl_x_key_listener(
2076
+ key_listener_stop_event,
2077
+ on_escape=lambda: None, # Ctrl+X handled by command_runner
2078
+ on_cancel_agent=schedule_agent_cancel,
2079
+ )
2080
+
2081
+ # Wait for the task to complete or be cancelled
2082
+ result = await agent_task
2083
+
2084
+ # Update MCP tool cache after successful run for accurate token estimation
2085
+ if hasattr(self, "_mcp_servers") and self._mcp_servers:
2086
+ try:
2087
+ await self._update_mcp_tool_cache()
2088
+ except Exception:
2089
+ pass # Don't fail the run if cache update fails
2090
+
2091
+ # Extract response text for the callback
2092
+ _run_response_text = ""
2093
+ if result is not None:
2094
+ if hasattr(result, "data"):
2095
+ _run_response_text = str(result.data) if result.data else ""
2096
+ elif hasattr(result, "output"):
2097
+ _run_response_text = str(result.output) if result.output else ""
2098
+ else:
2099
+ _run_response_text = str(result)
2100
+
2101
+ _run_success = True
2102
+ _run_error = None
2103
+ return result
2104
+ except asyncio.CancelledError:
2105
+ _run_success = False
2106
+ _run_error = None # Cancellation is not an error
2107
+ _run_response_text = ""
2108
+ agent_task.cancel()
2109
+ except KeyboardInterrupt:
2110
+ _run_success = False
2111
+ _run_error = None # User interrupt is not an error
2112
+ _run_response_text = ""
2113
+ if not agent_task.done():
2114
+ agent_task.cancel()
2115
+ except Exception as e:
2116
+ _run_success = False
2117
+ _run_error = e
2118
+ _run_response_text = ""
2119
+ raise
2120
+ finally:
2121
+ # Fire agent_run_end hook - plugins can use this for:
2122
+ # - Stopping background tasks (token refresh heartbeats)
2123
+ # - Workflow orchestration (Ralph's autonomous loop)
2124
+ # - Logging/analytics
2125
+ try:
2126
+ await on_agent_run_end(
2127
+ agent_name=self.name,
2128
+ model_name=self.get_model_name(),
2129
+ session_id=group_id,
2130
+ success=_run_success,
2131
+ error=_run_error,
2132
+ response_text=_run_response_text,
2133
+ metadata={"model": self.get_model_name()},
2134
+ )
2135
+ except Exception:
2136
+ pass # Don't fail cleanup if hook fails
2137
+
2138
+ # Stop keyboard listener if it was started
2139
+ if key_listener_stop_event is not None:
2140
+ key_listener_stop_event.set()
2141
+ # Restore original signal handler
2142
+ if (
2143
+ original_handler is not None
2144
+ ): # Explicit None check - SIG_DFL can be 0/falsy!
2145
+ signal.signal(signal.SIGINT, original_handler)