codepp 0.0.437__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +10 -0
- code_puppy/__main__.py +10 -0
- code_puppy/agents/__init__.py +31 -0
- code_puppy/agents/agent_c_reviewer.py +155 -0
- code_puppy/agents/agent_code_puppy.py +117 -0
- code_puppy/agents/agent_code_reviewer.py +90 -0
- code_puppy/agents/agent_cpp_reviewer.py +132 -0
- code_puppy/agents/agent_creator_agent.py +638 -0
- code_puppy/agents/agent_golang_reviewer.py +151 -0
- code_puppy/agents/agent_helios.py +124 -0
- code_puppy/agents/agent_javascript_reviewer.py +160 -0
- code_puppy/agents/agent_manager.py +742 -0
- code_puppy/agents/agent_pack_leader.py +385 -0
- code_puppy/agents/agent_planning.py +165 -0
- code_puppy/agents/agent_python_programmer.py +169 -0
- code_puppy/agents/agent_python_reviewer.py +90 -0
- code_puppy/agents/agent_qa_expert.py +163 -0
- code_puppy/agents/agent_qa_kitten.py +208 -0
- code_puppy/agents/agent_scheduler.py +121 -0
- code_puppy/agents/agent_security_auditor.py +181 -0
- code_puppy/agents/agent_terminal_qa.py +323 -0
- code_puppy/agents/agent_typescript_reviewer.py +166 -0
- code_puppy/agents/base_agent.py +2156 -0
- code_puppy/agents/event_stream_handler.py +348 -0
- code_puppy/agents/json_agent.py +202 -0
- code_puppy/agents/pack/__init__.py +34 -0
- code_puppy/agents/pack/bloodhound.py +304 -0
- code_puppy/agents/pack/husky.py +327 -0
- code_puppy/agents/pack/retriever.py +393 -0
- code_puppy/agents/pack/shepherd.py +348 -0
- code_puppy/agents/pack/terrier.py +287 -0
- code_puppy/agents/pack/watchdog.py +367 -0
- code_puppy/agents/prompt_reviewer.py +145 -0
- code_puppy/agents/subagent_stream_handler.py +276 -0
- code_puppy/api/__init__.py +13 -0
- code_puppy/api/app.py +169 -0
- code_puppy/api/main.py +21 -0
- code_puppy/api/pty_manager.py +453 -0
- code_puppy/api/routers/__init__.py +12 -0
- code_puppy/api/routers/agents.py +36 -0
- code_puppy/api/routers/commands.py +217 -0
- code_puppy/api/routers/config.py +75 -0
- code_puppy/api/routers/sessions.py +234 -0
- code_puppy/api/templates/terminal.html +361 -0
- code_puppy/api/websocket.py +154 -0
- code_puppy/callbacks.py +692 -0
- code_puppy/chatgpt_codex_client.py +338 -0
- code_puppy/claude_cache_client.py +672 -0
- code_puppy/cli_runner.py +1073 -0
- code_puppy/command_line/__init__.py +1 -0
- code_puppy/command_line/add_model_menu.py +1092 -0
- code_puppy/command_line/agent_menu.py +662 -0
- code_puppy/command_line/attachments.py +395 -0
- code_puppy/command_line/autosave_menu.py +704 -0
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/colors_menu.py +532 -0
- code_puppy/command_line/command_handler.py +293 -0
- code_puppy/command_line/command_registry.py +150 -0
- code_puppy/command_line/config_commands.py +719 -0
- code_puppy/command_line/core_commands.py +867 -0
- code_puppy/command_line/diff_menu.py +865 -0
- code_puppy/command_line/file_path_completion.py +73 -0
- code_puppy/command_line/load_context_completion.py +52 -0
- code_puppy/command_line/mcp/__init__.py +10 -0
- code_puppy/command_line/mcp/base.py +32 -0
- code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
- code_puppy/command_line/mcp/custom_server_form.py +688 -0
- code_puppy/command_line/mcp/custom_server_installer.py +195 -0
- code_puppy/command_line/mcp/edit_command.py +148 -0
- code_puppy/command_line/mcp/handler.py +138 -0
- code_puppy/command_line/mcp/help_command.py +147 -0
- code_puppy/command_line/mcp/install_command.py +214 -0
- code_puppy/command_line/mcp/install_menu.py +705 -0
- code_puppy/command_line/mcp/list_command.py +94 -0
- code_puppy/command_line/mcp/logs_command.py +235 -0
- code_puppy/command_line/mcp/remove_command.py +82 -0
- code_puppy/command_line/mcp/restart_command.py +100 -0
- code_puppy/command_line/mcp/search_command.py +123 -0
- code_puppy/command_line/mcp/start_all_command.py +135 -0
- code_puppy/command_line/mcp/start_command.py +117 -0
- code_puppy/command_line/mcp/status_command.py +184 -0
- code_puppy/command_line/mcp/stop_all_command.py +112 -0
- code_puppy/command_line/mcp/stop_command.py +80 -0
- code_puppy/command_line/mcp/test_command.py +107 -0
- code_puppy/command_line/mcp/utils.py +129 -0
- code_puppy/command_line/mcp/wizard_utils.py +334 -0
- code_puppy/command_line/mcp_completion.py +174 -0
- code_puppy/command_line/model_picker_completion.py +197 -0
- code_puppy/command_line/model_settings_menu.py +932 -0
- code_puppy/command_line/motd.py +96 -0
- code_puppy/command_line/onboarding_slides.py +179 -0
- code_puppy/command_line/onboarding_wizard.py +342 -0
- code_puppy/command_line/pin_command_completion.py +329 -0
- code_puppy/command_line/prompt_toolkit_completion.py +846 -0
- code_puppy/command_line/session_commands.py +302 -0
- code_puppy/command_line/shell_passthrough.py +145 -0
- code_puppy/command_line/skills_completion.py +160 -0
- code_puppy/command_line/uc_menu.py +893 -0
- code_puppy/command_line/utils.py +93 -0
- code_puppy/command_line/wiggum_state.py +78 -0
- code_puppy/config.py +1770 -0
- code_puppy/error_logging.py +134 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/gemini_model.py +754 -0
- code_puppy/hook_engine/README.md +105 -0
- code_puppy/hook_engine/__init__.py +21 -0
- code_puppy/hook_engine/aliases.py +155 -0
- code_puppy/hook_engine/engine.py +221 -0
- code_puppy/hook_engine/executor.py +296 -0
- code_puppy/hook_engine/matcher.py +156 -0
- code_puppy/hook_engine/models.py +240 -0
- code_puppy/hook_engine/registry.py +106 -0
- code_puppy/hook_engine/validator.py +144 -0
- code_puppy/http_utils.py +361 -0
- code_puppy/keymap.py +128 -0
- code_puppy/main.py +10 -0
- code_puppy/mcp_/__init__.py +66 -0
- code_puppy/mcp_/async_lifecycle.py +286 -0
- code_puppy/mcp_/blocking_startup.py +469 -0
- code_puppy/mcp_/captured_stdio_server.py +275 -0
- code_puppy/mcp_/circuit_breaker.py +290 -0
- code_puppy/mcp_/config_wizard.py +507 -0
- code_puppy/mcp_/dashboard.py +308 -0
- code_puppy/mcp_/error_isolation.py +407 -0
- code_puppy/mcp_/examples/retry_example.py +226 -0
- code_puppy/mcp_/health_monitor.py +589 -0
- code_puppy/mcp_/managed_server.py +428 -0
- code_puppy/mcp_/manager.py +807 -0
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/mcp_/registry.py +451 -0
- code_puppy/mcp_/retry_manager.py +337 -0
- code_puppy/mcp_/server_registry_catalog.py +1126 -0
- code_puppy/mcp_/status_tracker.py +355 -0
- code_puppy/mcp_/system_tools.py +209 -0
- code_puppy/mcp_prompts/__init__.py +1 -0
- code_puppy/mcp_prompts/hook_creator.py +103 -0
- code_puppy/messaging/__init__.py +255 -0
- code_puppy/messaging/bus.py +613 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +361 -0
- code_puppy/messaging/messages.py +569 -0
- code_puppy/messaging/queue_console.py +271 -0
- code_puppy/messaging/renderers.py +311 -0
- code_puppy/messaging/rich_renderer.py +1158 -0
- code_puppy/messaging/spinner/__init__.py +83 -0
- code_puppy/messaging/spinner/console_spinner.py +240 -0
- code_puppy/messaging/spinner/spinner_base.py +95 -0
- code_puppy/messaging/subagent_console.py +460 -0
- code_puppy/model_factory.py +848 -0
- code_puppy/model_switching.py +63 -0
- code_puppy/model_utils.py +168 -0
- code_puppy/models.json +174 -0
- code_puppy/models_dev_api.json +1 -0
- code_puppy/models_dev_parser.py +592 -0
- code_puppy/plugins/__init__.py +186 -0
- code_puppy/plugins/agent_skills/__init__.py +22 -0
- code_puppy/plugins/agent_skills/config.py +175 -0
- code_puppy/plugins/agent_skills/discovery.py +136 -0
- code_puppy/plugins/agent_skills/downloader.py +392 -0
- code_puppy/plugins/agent_skills/installer.py +22 -0
- code_puppy/plugins/agent_skills/metadata.py +219 -0
- code_puppy/plugins/agent_skills/prompt_builder.py +60 -0
- code_puppy/plugins/agent_skills/register_callbacks.py +241 -0
- code_puppy/plugins/agent_skills/remote_catalog.py +322 -0
- code_puppy/plugins/agent_skills/skill_catalog.py +257 -0
- code_puppy/plugins/agent_skills/skills_install_menu.py +664 -0
- code_puppy/plugins/agent_skills/skills_menu.py +781 -0
- code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
- code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +706 -0
- code_puppy/plugins/antigravity_oauth/config.py +42 -0
- code_puppy/plugins/antigravity_oauth/constants.py +133 -0
- code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +518 -0
- code_puppy/plugins/antigravity_oauth/storage.py +288 -0
- code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
- code_puppy/plugins/antigravity_oauth/token.py +167 -0
- code_puppy/plugins/antigravity_oauth/transport.py +863 -0
- code_puppy/plugins/antigravity_oauth/utils.py +168 -0
- code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
- code_puppy/plugins/chatgpt_oauth/config.py +52 -0
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +329 -0
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +176 -0
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +301 -0
- code_puppy/plugins/chatgpt_oauth/utils.py +523 -0
- code_puppy/plugins/claude_code_hooks/__init__.py +1 -0
- code_puppy/plugins/claude_code_hooks/config.py +137 -0
- code_puppy/plugins/claude_code_hooks/register_callbacks.py +175 -0
- code_puppy/plugins/claude_code_oauth/README.md +167 -0
- code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
- code_puppy/plugins/claude_code_oauth/__init__.py +25 -0
- code_puppy/plugins/claude_code_oauth/config.py +52 -0
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +453 -0
- code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_puppy/plugins/claude_code_oauth/token_refresh_heartbeat.py +241 -0
- code_puppy/plugins/claude_code_oauth/utils.py +640 -0
- code_puppy/plugins/customizable_commands/__init__.py +0 -0
- code_puppy/plugins/customizable_commands/register_callbacks.py +152 -0
- code_puppy/plugins/example_custom_command/README.md +280 -0
- code_puppy/plugins/example_custom_command/register_callbacks.py +51 -0
- code_puppy/plugins/file_permission_handler/__init__.py +4 -0
- code_puppy/plugins/file_permission_handler/register_callbacks.py +470 -0
- code_puppy/plugins/frontend_emitter/__init__.py +25 -0
- code_puppy/plugins/frontend_emitter/emitter.py +121 -0
- code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
- code_puppy/plugins/hook_creator/__init__.py +1 -0
- code_puppy/plugins/hook_creator/register_callbacks.py +33 -0
- code_puppy/plugins/hook_manager/__init__.py +1 -0
- code_puppy/plugins/hook_manager/config.py +290 -0
- code_puppy/plugins/hook_manager/hooks_menu.py +564 -0
- code_puppy/plugins/hook_manager/register_callbacks.py +227 -0
- code_puppy/plugins/oauth_puppy_html.py +228 -0
- code_puppy/plugins/scheduler/__init__.py +1 -0
- code_puppy/plugins/scheduler/register_callbacks.py +88 -0
- code_puppy/plugins/scheduler/scheduler_menu.py +522 -0
- code_puppy/plugins/scheduler/scheduler_wizard.py +341 -0
- code_puppy/plugins/shell_safety/__init__.py +6 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
- code_puppy/plugins/synthetic_status/__init__.py +1 -0
- code_puppy/plugins/synthetic_status/register_callbacks.py +132 -0
- code_puppy/plugins/synthetic_status/status_api.py +147 -0
- code_puppy/plugins/universal_constructor/__init__.py +13 -0
- code_puppy/plugins/universal_constructor/models.py +138 -0
- code_puppy/plugins/universal_constructor/register_callbacks.py +47 -0
- code_puppy/plugins/universal_constructor/registry.py +302 -0
- code_puppy/plugins/universal_constructor/sandbox.py +584 -0
- code_puppy/prompts/antigravity_system_prompt.md +1 -0
- code_puppy/pydantic_patches.py +356 -0
- code_puppy/reopenable_async_client.py +232 -0
- code_puppy/round_robin_model.py +150 -0
- code_puppy/scheduler/__init__.py +41 -0
- code_puppy/scheduler/__main__.py +9 -0
- code_puppy/scheduler/cli.py +118 -0
- code_puppy/scheduler/config.py +126 -0
- code_puppy/scheduler/daemon.py +280 -0
- code_puppy/scheduler/executor.py +155 -0
- code_puppy/scheduler/platform.py +19 -0
- code_puppy/scheduler/platform_unix.py +22 -0
- code_puppy/scheduler/platform_win.py +32 -0
- code_puppy/session_storage.py +338 -0
- code_puppy/status_display.py +257 -0
- code_puppy/summarization_agent.py +176 -0
- code_puppy/terminal_utils.py +418 -0
- code_puppy/tools/__init__.py +501 -0
- code_puppy/tools/agent_tools.py +603 -0
- code_puppy/tools/ask_user_question/__init__.py +26 -0
- code_puppy/tools/ask_user_question/constants.py +73 -0
- code_puppy/tools/ask_user_question/demo_tui.py +55 -0
- code_puppy/tools/ask_user_question/handler.py +232 -0
- code_puppy/tools/ask_user_question/models.py +304 -0
- code_puppy/tools/ask_user_question/registration.py +26 -0
- code_puppy/tools/ask_user_question/renderers.py +309 -0
- code_puppy/tools/ask_user_question/terminal_ui.py +329 -0
- code_puppy/tools/ask_user_question/theme.py +155 -0
- code_puppy/tools/ask_user_question/tui_loop.py +423 -0
- code_puppy/tools/browser/__init__.py +37 -0
- code_puppy/tools/browser/browser_control.py +289 -0
- code_puppy/tools/browser/browser_interactions.py +545 -0
- code_puppy/tools/browser/browser_locators.py +640 -0
- code_puppy/tools/browser/browser_manager.py +378 -0
- code_puppy/tools/browser/browser_navigation.py +251 -0
- code_puppy/tools/browser/browser_screenshot.py +179 -0
- code_puppy/tools/browser/browser_scripts.py +462 -0
- code_puppy/tools/browser/browser_workflows.py +221 -0
- code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
- code_puppy/tools/browser/terminal_command_tools.py +534 -0
- code_puppy/tools/browser/terminal_screenshot_tools.py +552 -0
- code_puppy/tools/browser/terminal_tools.py +525 -0
- code_puppy/tools/command_runner.py +1346 -0
- code_puppy/tools/common.py +1409 -0
- code_puppy/tools/display.py +84 -0
- code_puppy/tools/file_modifications.py +886 -0
- code_puppy/tools/file_operations.py +802 -0
- code_puppy/tools/scheduler_tools.py +412 -0
- code_puppy/tools/skills_tools.py +244 -0
- code_puppy/tools/subagent_context.py +158 -0
- code_puppy/tools/tools_content.py +51 -0
- code_puppy/tools/universal_constructor.py +889 -0
- code_puppy/uvx_detection.py +242 -0
- code_puppy/version_checker.py +82 -0
- codepp-0.0.437.dist-info/METADATA +766 -0
- codepp-0.0.437.dist-info/RECORD +288 -0
- codepp-0.0.437.dist-info/WHEEL +4 -0
- codepp-0.0.437.dist-info/entry_points.txt +3 -0
- codepp-0.0.437.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,2156 @@
|
|
|
1
|
+
"""Base agent configuration class for defining agent properties."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import dataclasses
|
|
5
|
+
import json
|
|
6
|
+
import math
|
|
7
|
+
import pathlib
|
|
8
|
+
import signal
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
import traceback
|
|
12
|
+
import uuid
|
|
13
|
+
from abc import ABC, abstractmethod
|
|
14
|
+
from typing import (
|
|
15
|
+
Any,
|
|
16
|
+
Callable,
|
|
17
|
+
Dict,
|
|
18
|
+
List,
|
|
19
|
+
Optional,
|
|
20
|
+
Sequence,
|
|
21
|
+
Set,
|
|
22
|
+
Tuple,
|
|
23
|
+
Type,
|
|
24
|
+
Union,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
import mcp
|
|
28
|
+
import pydantic
|
|
29
|
+
import pydantic_ai.models
|
|
30
|
+
from dbos import DBOS, SetWorkflowID
|
|
31
|
+
from pydantic_ai import Agent as PydanticAgent
|
|
32
|
+
from pydantic_ai import (
|
|
33
|
+
BinaryContent,
|
|
34
|
+
DocumentUrl,
|
|
35
|
+
ImageUrl,
|
|
36
|
+
RunContext,
|
|
37
|
+
UsageLimitExceeded,
|
|
38
|
+
UsageLimits,
|
|
39
|
+
)
|
|
40
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
41
|
+
from pydantic_ai.messages import (
|
|
42
|
+
ModelMessage,
|
|
43
|
+
ModelRequest,
|
|
44
|
+
ModelResponse,
|
|
45
|
+
TextPart,
|
|
46
|
+
ThinkingPart,
|
|
47
|
+
ToolCallPart,
|
|
48
|
+
ToolCallPartDelta,
|
|
49
|
+
ToolReturn,
|
|
50
|
+
ToolReturnPart,
|
|
51
|
+
)
|
|
52
|
+
from rich.text import Text
|
|
53
|
+
|
|
54
|
+
from code_puppy.agents.event_stream_handler import event_stream_handler
|
|
55
|
+
from code_puppy.callbacks import (
|
|
56
|
+
on_agent_run_end,
|
|
57
|
+
on_agent_run_start,
|
|
58
|
+
on_message_history_processor_end,
|
|
59
|
+
on_message_history_processor_start,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Consolidated relative imports
|
|
63
|
+
from code_puppy.config import (
|
|
64
|
+
get_agent_pinned_model,
|
|
65
|
+
get_compaction_strategy,
|
|
66
|
+
get_compaction_threshold,
|
|
67
|
+
get_global_model_name,
|
|
68
|
+
get_message_limit,
|
|
69
|
+
get_protected_token_count,
|
|
70
|
+
get_use_dbos,
|
|
71
|
+
get_value,
|
|
72
|
+
)
|
|
73
|
+
from code_puppy.error_logging import log_error
|
|
74
|
+
from code_puppy.keymap import cancel_agent_uses_signal, get_cancel_agent_char_code
|
|
75
|
+
from code_puppy.mcp_ import get_mcp_manager
|
|
76
|
+
from code_puppy.messaging import (
|
|
77
|
+
emit_error,
|
|
78
|
+
emit_info,
|
|
79
|
+
emit_warning,
|
|
80
|
+
)
|
|
81
|
+
from code_puppy.messaging.spinner import (
|
|
82
|
+
SpinnerBase,
|
|
83
|
+
update_spinner_context,
|
|
84
|
+
)
|
|
85
|
+
from code_puppy.model_factory import ModelFactory, make_model_settings
|
|
86
|
+
from code_puppy.summarization_agent import run_summarization_sync, SummarizationError
|
|
87
|
+
from code_puppy.tools.agent_tools import _active_subagent_tasks
|
|
88
|
+
from code_puppy.tools.command_runner import (
|
|
89
|
+
is_awaiting_user_input,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Global flag to track delayed compaction requests
|
|
93
|
+
_delayed_compaction_requested = False
|
|
94
|
+
|
|
95
|
+
_reload_count = 0
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _log_error_to_file(exc: Exception) -> Optional[str]:
|
|
99
|
+
"""Log detailed error information to ~/.code_puppy/error_logs/log_{timestamp}.txt.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
exc: The exception to log.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
The path to the log file if successful, None otherwise.
|
|
106
|
+
"""
|
|
107
|
+
try:
|
|
108
|
+
from code_puppy.error_logging import get_logs_dir
|
|
109
|
+
|
|
110
|
+
error_logs_dir = pathlib.Path(get_logs_dir())
|
|
111
|
+
error_logs_dir.mkdir(parents=True, exist_ok=True)
|
|
112
|
+
|
|
113
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
114
|
+
log_file = error_logs_dir / f"log_{timestamp}.txt"
|
|
115
|
+
|
|
116
|
+
with open(log_file, "w", encoding="utf-8") as f:
|
|
117
|
+
f.write(f"Timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
118
|
+
f.write(f"Exception Type: {type(exc).__name__}\n")
|
|
119
|
+
f.write(f"Exception Message: {str(exc)}\n")
|
|
120
|
+
f.write(f"Exception Args: {exc.args}\n")
|
|
121
|
+
f.write("\n--- Full Traceback ---\n")
|
|
122
|
+
f.write(traceback.format_exc())
|
|
123
|
+
f.write("\n--- Exception Chain ---\n")
|
|
124
|
+
# Walk the exception chain for chained exceptions
|
|
125
|
+
current = exc
|
|
126
|
+
chain_depth = 0
|
|
127
|
+
while current is not None and chain_depth < 10:
|
|
128
|
+
f.write(
|
|
129
|
+
f"\n[Cause {chain_depth}] {type(current).__name__}: {current}\n"
|
|
130
|
+
)
|
|
131
|
+
f.write("".join(traceback.format_tb(current.__traceback__)))
|
|
132
|
+
current = (
|
|
133
|
+
current.__cause__ if current.__cause__ else current.__context__
|
|
134
|
+
)
|
|
135
|
+
chain_depth += 1
|
|
136
|
+
|
|
137
|
+
return str(log_file)
|
|
138
|
+
except Exception:
|
|
139
|
+
# Don't let logging errors break the main flow
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class BaseAgent(ABC):
|
|
144
|
+
"""Base class for all agent configurations."""
|
|
145
|
+
|
|
146
|
+
def __init__(self):
|
|
147
|
+
self.id = str(uuid.uuid4())
|
|
148
|
+
self._message_history: List[Any] = []
|
|
149
|
+
self._compacted_message_hashes: Set[str] = set()
|
|
150
|
+
# Agent construction cache
|
|
151
|
+
self._code_generation_agent = None
|
|
152
|
+
self._last_model_name: Optional[str] = None
|
|
153
|
+
# Puppy rules loaded lazily
|
|
154
|
+
self._puppy_rules: Optional[str] = None
|
|
155
|
+
self.cur_model: pydantic_ai.models.Model
|
|
156
|
+
# Cache for MCP tool definitions (for token estimation)
|
|
157
|
+
# This is populated after the first successful run when MCP tools are retrieved
|
|
158
|
+
self._mcp_tool_definitions_cache: List[Dict[str, Any]] = []
|
|
159
|
+
|
|
160
|
+
def get_identity(self) -> str:
|
|
161
|
+
"""Get a unique identity for this agent instance.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
A string like 'python-programmer-a3f2b1' combining name + short UUID.
|
|
165
|
+
"""
|
|
166
|
+
return f"{self.name}-{self.id[:6]}"
|
|
167
|
+
|
|
168
|
+
def get_identity_prompt(self) -> str:
|
|
169
|
+
"""Get the identity prompt suffix to embed in system prompts.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
A string instructing the agent about its identity for task ownership.
|
|
173
|
+
"""
|
|
174
|
+
return (
|
|
175
|
+
f"\n\nYour ID is `{self.get_identity()}`. "
|
|
176
|
+
"Use this for any tasks which require identifying yourself "
|
|
177
|
+
"such as claiming task ownership or coordination with other agents."
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def get_full_system_prompt(self) -> str:
|
|
181
|
+
"""Get the complete system prompt with identity automatically appended.
|
|
182
|
+
|
|
183
|
+
This wraps get_system_prompt() and appends the agent's identity,
|
|
184
|
+
so subclasses don't need to worry about it.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
The full system prompt including identity information.
|
|
188
|
+
"""
|
|
189
|
+
return self.get_system_prompt() + self.get_identity_prompt()
|
|
190
|
+
|
|
191
|
+
@property
|
|
192
|
+
@abstractmethod
|
|
193
|
+
def name(self) -> str:
|
|
194
|
+
"""Unique identifier for the agent."""
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
@property
|
|
198
|
+
@abstractmethod
|
|
199
|
+
def display_name(self) -> str:
|
|
200
|
+
"""Human-readable name for the agent."""
|
|
201
|
+
pass
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
@abstractmethod
|
|
205
|
+
def description(self) -> str:
|
|
206
|
+
"""Brief description of what this agent does."""
|
|
207
|
+
pass
|
|
208
|
+
|
|
209
|
+
@abstractmethod
|
|
210
|
+
def get_system_prompt(self) -> str:
|
|
211
|
+
"""Get the system prompt for this agent."""
|
|
212
|
+
pass
|
|
213
|
+
|
|
214
|
+
@abstractmethod
|
|
215
|
+
def get_available_tools(self) -> List[str]:
|
|
216
|
+
"""Get list of tool names that this agent should have access to.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
List of tool names to register for this agent.
|
|
220
|
+
"""
|
|
221
|
+
pass
|
|
222
|
+
|
|
223
|
+
def get_tools_config(self) -> Optional[Dict[str, Any]]:
|
|
224
|
+
"""Get tool configuration for this agent.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Dict with tool configuration, or None to use default tools.
|
|
228
|
+
"""
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
def get_user_prompt(self) -> Optional[str]:
|
|
232
|
+
"""Get custom user prompt for this agent.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Custom prompt string, or None to use default.
|
|
236
|
+
"""
|
|
237
|
+
return None
|
|
238
|
+
|
|
239
|
+
# Message history management methods
|
|
240
|
+
def get_message_history(self) -> List[Any]:
|
|
241
|
+
"""Get the message history for this agent.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
List of messages in this agent's conversation history.
|
|
245
|
+
"""
|
|
246
|
+
return self._message_history
|
|
247
|
+
|
|
248
|
+
def set_message_history(self, history: List[Any]) -> None:
|
|
249
|
+
"""Set the message history for this agent.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
history: List of messages to set as the conversation history.
|
|
253
|
+
"""
|
|
254
|
+
self._message_history = history
|
|
255
|
+
|
|
256
|
+
def clear_message_history(self) -> None:
|
|
257
|
+
"""Clear the message history for this agent."""
|
|
258
|
+
self._message_history = []
|
|
259
|
+
self._compacted_message_hashes.clear()
|
|
260
|
+
|
|
261
|
+
def append_to_message_history(self, message: Any) -> None:
|
|
262
|
+
"""Append a message to this agent's history.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
message: Message to append to the conversation history.
|
|
266
|
+
"""
|
|
267
|
+
self._message_history.append(message)
|
|
268
|
+
|
|
269
|
+
def extend_message_history(self, history: List[Any]) -> None:
|
|
270
|
+
"""Extend this agent's message history with multiple messages.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
history: List of messages to append to the conversation history.
|
|
274
|
+
"""
|
|
275
|
+
self._message_history.extend(history)
|
|
276
|
+
|
|
277
|
+
def get_compacted_message_hashes(self) -> Set[str]:
|
|
278
|
+
"""Get the set of compacted message hashes for this agent.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
Set of hashes for messages that have been compacted/summarized.
|
|
282
|
+
"""
|
|
283
|
+
return self._compacted_message_hashes
|
|
284
|
+
|
|
285
|
+
def add_compacted_message_hash(self, message_hash: str) -> None:
|
|
286
|
+
"""Add a message hash to the set of compacted message hashes.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
message_hash: Hash of a message that has been compacted/summarized.
|
|
290
|
+
"""
|
|
291
|
+
self._compacted_message_hashes.add(message_hash)
|
|
292
|
+
|
|
293
|
+
def get_model_name(self) -> Optional[str]:
|
|
294
|
+
"""Get pinned model name for this agent, if specified.
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
Model name to use for this agent, or global default if none pinned.
|
|
298
|
+
"""
|
|
299
|
+
pinned = get_agent_pinned_model(self.name)
|
|
300
|
+
if pinned == "" or pinned is None:
|
|
301
|
+
return get_global_model_name()
|
|
302
|
+
return pinned
|
|
303
|
+
|
|
304
|
+
def _clean_binaries(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
305
|
+
"""Remove BinaryContent items from message parts.
|
|
306
|
+
|
|
307
|
+
Note: This mutates the messages in-place by modifying part.content.
|
|
308
|
+
The return value is the same list for API consistency.
|
|
309
|
+
"""
|
|
310
|
+
for message in messages:
|
|
311
|
+
for part in message.parts:
|
|
312
|
+
if hasattr(part, "content") and isinstance(part.content, list):
|
|
313
|
+
part.content = [
|
|
314
|
+
item
|
|
315
|
+
for item in part.content
|
|
316
|
+
if not isinstance(item, BinaryContent)
|
|
317
|
+
]
|
|
318
|
+
return messages
|
|
319
|
+
|
|
320
|
+
def ensure_history_ends_with_request(
|
|
321
|
+
self, messages: List[ModelMessage]
|
|
322
|
+
) -> List[ModelMessage]:
|
|
323
|
+
"""Ensure message history ends with a ModelRequest.
|
|
324
|
+
|
|
325
|
+
pydantic_ai requires that processed message history ends with a ModelRequest.
|
|
326
|
+
This can fail when swapping models mid-conversation if the history ends with
|
|
327
|
+
a ModelResponse from the previous model.
|
|
328
|
+
|
|
329
|
+
This method trims trailing ModelResponse messages to ensure compatibility.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
messages: List of messages to validate/fix.
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
List of messages guaranteed to end with ModelRequest, or empty list
|
|
336
|
+
if no ModelRequest is found.
|
|
337
|
+
"""
|
|
338
|
+
messages = list(messages) # defensive copy
|
|
339
|
+
if not messages:
|
|
340
|
+
return messages
|
|
341
|
+
|
|
342
|
+
# Trim trailing ModelResponse messages
|
|
343
|
+
while messages and isinstance(messages[-1], ModelResponse):
|
|
344
|
+
messages = messages[:-1]
|
|
345
|
+
|
|
346
|
+
return messages
|
|
347
|
+
|
|
348
|
+
# Message history processing methods (moved from state_management.py and message_history_processor.py)
|
|
349
|
+
def _stringify_part(self, part: Any) -> str:
|
|
350
|
+
"""Create a stable string representation for a message part.
|
|
351
|
+
|
|
352
|
+
We deliberately ignore timestamps so identical content hashes the same even when
|
|
353
|
+
emitted at different times. This prevents status updates from blowing up the
|
|
354
|
+
history when they are repeated with new timestamps."""
|
|
355
|
+
|
|
356
|
+
attributes: List[str] = [part.__class__.__name__]
|
|
357
|
+
|
|
358
|
+
# Role/instructions help disambiguate parts that otherwise share content
|
|
359
|
+
if hasattr(part, "role") and part.role:
|
|
360
|
+
attributes.append(f"role={part.role}")
|
|
361
|
+
if hasattr(part, "instructions") and part.instructions:
|
|
362
|
+
attributes.append(f"instructions={part.instructions}")
|
|
363
|
+
|
|
364
|
+
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
365
|
+
attributes.append(f"tool_call_id={part.tool_call_id}")
|
|
366
|
+
|
|
367
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
368
|
+
attributes.append(f"tool_name={part.tool_name}")
|
|
369
|
+
|
|
370
|
+
content = getattr(part, "content", None)
|
|
371
|
+
if content is None:
|
|
372
|
+
attributes.append("content=None")
|
|
373
|
+
elif isinstance(content, str):
|
|
374
|
+
attributes.append(f"content={content}")
|
|
375
|
+
elif isinstance(content, pydantic.BaseModel):
|
|
376
|
+
attributes.append(
|
|
377
|
+
f"content={json.dumps(content.model_dump(), sort_keys=True)}"
|
|
378
|
+
)
|
|
379
|
+
elif isinstance(content, dict):
|
|
380
|
+
attributes.append(f"content={json.dumps(content, sort_keys=True)}")
|
|
381
|
+
elif isinstance(content, list):
|
|
382
|
+
for item in content:
|
|
383
|
+
if isinstance(item, str):
|
|
384
|
+
attributes.append(f"content={item}")
|
|
385
|
+
if isinstance(item, BinaryContent):
|
|
386
|
+
attributes.append(f"BinaryContent={hash(item.data)}")
|
|
387
|
+
else:
|
|
388
|
+
attributes.append(f"content={repr(content)}")
|
|
389
|
+
result = "|".join(attributes)
|
|
390
|
+
return result
|
|
391
|
+
|
|
392
|
+
def hash_message(self, message: Any) -> int:
|
|
393
|
+
"""Create a stable hash for a model message that ignores timestamps."""
|
|
394
|
+
role = getattr(message, "role", None)
|
|
395
|
+
instructions = getattr(message, "instructions", None)
|
|
396
|
+
header_bits: List[str] = []
|
|
397
|
+
if role:
|
|
398
|
+
header_bits.append(f"role={role}")
|
|
399
|
+
if instructions:
|
|
400
|
+
header_bits.append(f"instructions={instructions}")
|
|
401
|
+
|
|
402
|
+
part_strings = [
|
|
403
|
+
self._stringify_part(part) for part in getattr(message, "parts", [])
|
|
404
|
+
]
|
|
405
|
+
canonical = "||".join(header_bits + part_strings)
|
|
406
|
+
return hash(canonical)
|
|
407
|
+
|
|
408
|
+
def stringify_message_part(self, part) -> str:
|
|
409
|
+
"""
|
|
410
|
+
Convert a message part to a string representation for token estimation or other uses.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
part: A message part that may contain content or be a tool call
|
|
414
|
+
|
|
415
|
+
Returns:
|
|
416
|
+
String representation of the message part
|
|
417
|
+
"""
|
|
418
|
+
result = ""
|
|
419
|
+
if hasattr(part, "part_kind"):
|
|
420
|
+
result += part.part_kind + ": "
|
|
421
|
+
else:
|
|
422
|
+
result += str(type(part)) + ": "
|
|
423
|
+
|
|
424
|
+
# Handle content
|
|
425
|
+
if hasattr(part, "content") and part.content:
|
|
426
|
+
# Handle different content types
|
|
427
|
+
if isinstance(part.content, str):
|
|
428
|
+
result = part.content
|
|
429
|
+
elif isinstance(part.content, pydantic.BaseModel):
|
|
430
|
+
result = json.dumps(part.content.model_dump())
|
|
431
|
+
elif isinstance(part.content, dict):
|
|
432
|
+
result = json.dumps(part.content)
|
|
433
|
+
elif isinstance(part.content, list):
|
|
434
|
+
result = ""
|
|
435
|
+
for item in part.content:
|
|
436
|
+
if isinstance(item, str):
|
|
437
|
+
result += item + "\n"
|
|
438
|
+
if isinstance(item, BinaryContent):
|
|
439
|
+
result += f"BinaryContent={hash(item.data)}\n"
|
|
440
|
+
else:
|
|
441
|
+
result = str(part.content)
|
|
442
|
+
|
|
443
|
+
# Handle tool calls which may have additional token costs
|
|
444
|
+
# If part also has content, we'll process tool calls separately
|
|
445
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
446
|
+
# Estimate tokens for tool name and parameters
|
|
447
|
+
tool_text = part.tool_name
|
|
448
|
+
if hasattr(part, "args"):
|
|
449
|
+
tool_text += f" {str(part.args)}"
|
|
450
|
+
result += tool_text
|
|
451
|
+
|
|
452
|
+
return result
|
|
453
|
+
|
|
454
|
+
def estimate_token_count(self, text: str) -> int:
|
|
455
|
+
"""
|
|
456
|
+
Simple token estimation using len(message) / 2.5.
|
|
457
|
+
This replaces tiktoken with a much simpler approach.
|
|
458
|
+
"""
|
|
459
|
+
return max(1, math.floor((len(text) / 2.5)))
|
|
460
|
+
|
|
461
|
+
def estimate_tokens_for_message(self, message: ModelMessage) -> int:
|
|
462
|
+
"""
|
|
463
|
+
Estimate the number of tokens in a message using len(message)
|
|
464
|
+
Simple and fast replacement for tiktoken.
|
|
465
|
+
"""
|
|
466
|
+
total_tokens = 0
|
|
467
|
+
|
|
468
|
+
for part in message.parts:
|
|
469
|
+
part_str = self.stringify_message_part(part)
|
|
470
|
+
if part_str:
|
|
471
|
+
total_tokens += self.estimate_token_count(part_str)
|
|
472
|
+
|
|
473
|
+
return max(1, total_tokens)
|
|
474
|
+
|
|
475
|
+
def estimate_context_overhead_tokens(self) -> int:
|
|
476
|
+
"""
|
|
477
|
+
Estimate the token overhead from system prompt and tool definitions.
|
|
478
|
+
|
|
479
|
+
This accounts for tokens that are always present in the context:
|
|
480
|
+
- System prompt (for non-Claude-Code models)
|
|
481
|
+
- Tool definitions (name, description, parameter schema)
|
|
482
|
+
- MCP tool definitions
|
|
483
|
+
|
|
484
|
+
Note: For Claude Code models, the system prompt is prepended to the first
|
|
485
|
+
user message, so it's already counted in the message history tokens.
|
|
486
|
+
We only count the short fixed instructions for Claude Code models.
|
|
487
|
+
"""
|
|
488
|
+
total_tokens = 0
|
|
489
|
+
|
|
490
|
+
# 1. Estimate tokens for system prompt / instructions
|
|
491
|
+
# Use prepare_prompt_for_model() to get the correct instructions for token counting.
|
|
492
|
+
# For models that prepend system prompt to user message (claude-code, antigravity),
|
|
493
|
+
# this returns the short fixed instructions. For other models, returns full prompt.
|
|
494
|
+
try:
|
|
495
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
496
|
+
|
|
497
|
+
model_name = (
|
|
498
|
+
self.get_model_name() if hasattr(self, "get_model_name") else ""
|
|
499
|
+
)
|
|
500
|
+
system_prompt = self.get_full_system_prompt()
|
|
501
|
+
|
|
502
|
+
# Get the instructions that will be used (handles model-specific logic via hooks)
|
|
503
|
+
prepared = prepare_prompt_for_model(
|
|
504
|
+
model_name=model_name,
|
|
505
|
+
system_prompt=system_prompt,
|
|
506
|
+
user_prompt="", # Empty - we just need the instructions
|
|
507
|
+
prepend_system_to_user=False, # Don't modify prompt, just get instructions
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
if prepared.instructions:
|
|
511
|
+
total_tokens += self.estimate_token_count(prepared.instructions)
|
|
512
|
+
except Exception:
|
|
513
|
+
pass # If we can't get system prompt, skip it
|
|
514
|
+
|
|
515
|
+
# 2. Estimate tokens for pydantic_agent tool definitions
|
|
516
|
+
pydantic_agent = getattr(self, "pydantic_agent", None)
|
|
517
|
+
if pydantic_agent:
|
|
518
|
+
tools = getattr(pydantic_agent, "_tools", None)
|
|
519
|
+
if tools and isinstance(tools, dict):
|
|
520
|
+
for tool_name, tool_func in tools.items():
|
|
521
|
+
try:
|
|
522
|
+
# Estimate tokens from tool name
|
|
523
|
+
total_tokens += self.estimate_token_count(tool_name)
|
|
524
|
+
|
|
525
|
+
# Estimate tokens from tool description
|
|
526
|
+
description = getattr(tool_func, "__doc__", None) or ""
|
|
527
|
+
if description:
|
|
528
|
+
total_tokens += self.estimate_token_count(description)
|
|
529
|
+
|
|
530
|
+
# Estimate tokens from parameter schema
|
|
531
|
+
# Tools may have a schema attribute or we can try to get it from annotations
|
|
532
|
+
schema = getattr(tool_func, "schema", None)
|
|
533
|
+
if schema:
|
|
534
|
+
schema_str = (
|
|
535
|
+
json.dumps(schema)
|
|
536
|
+
if isinstance(schema, dict)
|
|
537
|
+
else str(schema)
|
|
538
|
+
)
|
|
539
|
+
total_tokens += self.estimate_token_count(schema_str)
|
|
540
|
+
else:
|
|
541
|
+
# Try to get schema from function annotations
|
|
542
|
+
annotations = getattr(tool_func, "__annotations__", None)
|
|
543
|
+
if annotations:
|
|
544
|
+
total_tokens += self.estimate_token_count(
|
|
545
|
+
str(annotations)
|
|
546
|
+
)
|
|
547
|
+
except Exception:
|
|
548
|
+
continue # Skip tools we can't process
|
|
549
|
+
|
|
550
|
+
# 3. Estimate tokens for MCP tool definitions from cache
|
|
551
|
+
# MCP tools are fetched asynchronously, so we use a cache that's populated
|
|
552
|
+
# after the first successful run. See _update_mcp_tool_cache() method.
|
|
553
|
+
mcp_tool_cache = getattr(self, "_mcp_tool_definitions_cache", [])
|
|
554
|
+
if mcp_tool_cache:
|
|
555
|
+
for tool_def in mcp_tool_cache:
|
|
556
|
+
try:
|
|
557
|
+
# Estimate tokens from tool name
|
|
558
|
+
tool_name = tool_def.get("name", "")
|
|
559
|
+
if tool_name:
|
|
560
|
+
total_tokens += self.estimate_token_count(tool_name)
|
|
561
|
+
|
|
562
|
+
# Estimate tokens from tool description
|
|
563
|
+
description = tool_def.get("description", "")
|
|
564
|
+
if description:
|
|
565
|
+
total_tokens += self.estimate_token_count(description)
|
|
566
|
+
|
|
567
|
+
# Estimate tokens from parameter schema (inputSchema)
|
|
568
|
+
input_schema = tool_def.get("inputSchema")
|
|
569
|
+
if input_schema:
|
|
570
|
+
schema_str = (
|
|
571
|
+
json.dumps(input_schema)
|
|
572
|
+
if isinstance(input_schema, dict)
|
|
573
|
+
else str(input_schema)
|
|
574
|
+
)
|
|
575
|
+
total_tokens += self.estimate_token_count(schema_str)
|
|
576
|
+
except Exception:
|
|
577
|
+
continue # Skip tools we can't process
|
|
578
|
+
|
|
579
|
+
return total_tokens
|
|
580
|
+
|
|
581
|
+
async def _update_mcp_tool_cache(self) -> None:
|
|
582
|
+
"""
|
|
583
|
+
Update the MCP tool definitions cache by fetching tools from running MCP servers.
|
|
584
|
+
|
|
585
|
+
This should be called after a successful run to populate the cache for
|
|
586
|
+
accurate token estimation in subsequent runs.
|
|
587
|
+
"""
|
|
588
|
+
mcp_servers = getattr(self, "_mcp_servers", None)
|
|
589
|
+
if not mcp_servers:
|
|
590
|
+
return
|
|
591
|
+
|
|
592
|
+
tool_definitions = []
|
|
593
|
+
for mcp_server in mcp_servers:
|
|
594
|
+
try:
|
|
595
|
+
# Check if the server has list_tools method (pydantic-ai MCP servers)
|
|
596
|
+
if hasattr(mcp_server, "list_tools"):
|
|
597
|
+
# list_tools() returns list[mcp_types.Tool]
|
|
598
|
+
tools = await mcp_server.list_tools()
|
|
599
|
+
for tool in tools:
|
|
600
|
+
tool_def = {
|
|
601
|
+
"name": getattr(tool, "name", ""),
|
|
602
|
+
"description": getattr(tool, "description", ""),
|
|
603
|
+
"inputSchema": getattr(tool, "inputSchema", {}),
|
|
604
|
+
}
|
|
605
|
+
tool_definitions.append(tool_def)
|
|
606
|
+
except Exception:
|
|
607
|
+
# Server might not be running or accessible, skip it
|
|
608
|
+
continue
|
|
609
|
+
|
|
610
|
+
self._mcp_tool_definitions_cache = tool_definitions
|
|
611
|
+
|
|
612
|
+
def update_mcp_tool_cache_sync(self) -> None:
|
|
613
|
+
"""
|
|
614
|
+
Synchronously clear the MCP tool cache.
|
|
615
|
+
|
|
616
|
+
This clears the cache so that token counts will be recalculated on the next
|
|
617
|
+
agent run. Call this after starting/stopping MCP servers.
|
|
618
|
+
|
|
619
|
+
Note: We don't try to fetch tools synchronously because MCP servers require
|
|
620
|
+
async context management that doesn't work well from sync code. The cache
|
|
621
|
+
will be repopulated on the next successful agent run.
|
|
622
|
+
"""
|
|
623
|
+
# Simply clear the cache - it will be repopulated on the next agent run
|
|
624
|
+
# This is safer than trying to call async methods from sync context
|
|
625
|
+
self._mcp_tool_definitions_cache = []
|
|
626
|
+
|
|
627
|
+
def _is_tool_call_part(self, part: Any) -> bool:
|
|
628
|
+
if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
|
|
629
|
+
return True
|
|
630
|
+
|
|
631
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
632
|
+
if part_kind == "tool-call":
|
|
633
|
+
return True
|
|
634
|
+
|
|
635
|
+
has_tool_name = getattr(part, "tool_name", None) is not None
|
|
636
|
+
has_args = getattr(part, "args", None) is not None
|
|
637
|
+
has_args_delta = getattr(part, "args_delta", None) is not None
|
|
638
|
+
|
|
639
|
+
return bool(has_tool_name and (has_args or has_args_delta))
|
|
640
|
+
|
|
641
|
+
def _is_tool_return_part(self, part: Any) -> bool:
|
|
642
|
+
if isinstance(part, (ToolReturnPart, ToolReturn)):
|
|
643
|
+
return True
|
|
644
|
+
|
|
645
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
646
|
+
if part_kind in {"tool-return", "tool-result"}:
|
|
647
|
+
return True
|
|
648
|
+
|
|
649
|
+
if getattr(part, "tool_call_id", None) is None:
|
|
650
|
+
return False
|
|
651
|
+
|
|
652
|
+
has_content = getattr(part, "content", None) is not None
|
|
653
|
+
has_content_delta = getattr(part, "content_delta", None) is not None
|
|
654
|
+
return bool(has_content or has_content_delta)
|
|
655
|
+
|
|
656
|
+
def filter_huge_messages(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
657
|
+
filtered = [m for m in messages if self.estimate_tokens_for_message(m) < 50000]
|
|
658
|
+
pruned = self.prune_interrupted_tool_calls(filtered)
|
|
659
|
+
return pruned
|
|
660
|
+
|
|
661
|
+
def _find_safe_split_index(
|
|
662
|
+
self, messages: List[ModelMessage], initial_split_idx: int
|
|
663
|
+
) -> int:
|
|
664
|
+
"""
|
|
665
|
+
Adjust split index to avoid breaking tool_use/tool_result pairs.
|
|
666
|
+
|
|
667
|
+
Ensures that if a tool_result is in the protected zone, its corresponding
|
|
668
|
+
tool_use is also included. Otherwise the LLM will error with
|
|
669
|
+
'tool_use ids found without tool_result blocks'.
|
|
670
|
+
|
|
671
|
+
Args:
|
|
672
|
+
messages: Full message list
|
|
673
|
+
initial_split_idx: The initial split point (messages before this go to summarize)
|
|
674
|
+
|
|
675
|
+
Returns:
|
|
676
|
+
Adjusted split index that doesn't break tool pairs
|
|
677
|
+
"""
|
|
678
|
+
if initial_split_idx <= 1:
|
|
679
|
+
return initial_split_idx
|
|
680
|
+
|
|
681
|
+
# Collect tool_call_ids from messages AFTER the split (protected zone)
|
|
682
|
+
protected_tool_return_ids: Set[str] = set()
|
|
683
|
+
for msg in messages[initial_split_idx:]:
|
|
684
|
+
for part in getattr(msg, "parts", []) or []:
|
|
685
|
+
if getattr(part, "part_kind", None) == "tool-return":
|
|
686
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
687
|
+
if tool_call_id:
|
|
688
|
+
protected_tool_return_ids.add(tool_call_id)
|
|
689
|
+
|
|
690
|
+
if not protected_tool_return_ids:
|
|
691
|
+
return initial_split_idx
|
|
692
|
+
|
|
693
|
+
# Scan backwards from split point to find any tool_uses that match protected returns
|
|
694
|
+
adjusted_idx = initial_split_idx
|
|
695
|
+
for i in range(
|
|
696
|
+
initial_split_idx - 1, 0, -1
|
|
697
|
+
): # Don't include system message at 0
|
|
698
|
+
msg = messages[i]
|
|
699
|
+
has_matching_tool_use = False
|
|
700
|
+
for part in getattr(msg, "parts", []) or []:
|
|
701
|
+
if getattr(part, "part_kind", None) == "tool-call":
|
|
702
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
703
|
+
if tool_call_id and tool_call_id in protected_tool_return_ids:
|
|
704
|
+
has_matching_tool_use = True
|
|
705
|
+
break
|
|
706
|
+
|
|
707
|
+
if has_matching_tool_use:
|
|
708
|
+
# This message has a tool_use whose return is in protected zone
|
|
709
|
+
# Move the split point back to include this message in protected zone
|
|
710
|
+
adjusted_idx = i
|
|
711
|
+
else:
|
|
712
|
+
# Once we find a message without matching tool_use, we can stop
|
|
713
|
+
# (tool calls and returns should be adjacent)
|
|
714
|
+
break
|
|
715
|
+
|
|
716
|
+
return adjusted_idx
|
|
717
|
+
|
|
718
|
+
def split_messages_for_protected_summarization(
|
|
719
|
+
self,
|
|
720
|
+
messages: List[ModelMessage],
|
|
721
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
722
|
+
"""
|
|
723
|
+
Split messages into two groups: messages to summarize and protected recent messages.
|
|
724
|
+
|
|
725
|
+
Returns:
|
|
726
|
+
Tuple of (messages_to_summarize, protected_messages)
|
|
727
|
+
|
|
728
|
+
The protected_messages are the most recent messages that total up to the configured protected token count.
|
|
729
|
+
The system message (first message) is always protected.
|
|
730
|
+
All other messages that don't fit in the protected zone will be summarized.
|
|
731
|
+
"""
|
|
732
|
+
if len(messages) <= 1: # Just system message or empty
|
|
733
|
+
return [], messages
|
|
734
|
+
|
|
735
|
+
# Always protect the system message (first message)
|
|
736
|
+
system_message = messages[0]
|
|
737
|
+
system_tokens = self.estimate_tokens_for_message(system_message)
|
|
738
|
+
|
|
739
|
+
if len(messages) == 1:
|
|
740
|
+
return [], messages
|
|
741
|
+
|
|
742
|
+
# Get the configured protected token count
|
|
743
|
+
protected_tokens_limit = get_protected_token_count()
|
|
744
|
+
|
|
745
|
+
# Calculate tokens for messages from most recent backwards (excluding system message)
|
|
746
|
+
protected_messages = []
|
|
747
|
+
protected_token_count = system_tokens # Start with system message tokens
|
|
748
|
+
|
|
749
|
+
# Go backwards through non-system messages to find protected zone
|
|
750
|
+
for i in range(
|
|
751
|
+
len(messages) - 1, 0, -1
|
|
752
|
+
): # Stop at 1, not 0 (skip system message)
|
|
753
|
+
message = messages[i]
|
|
754
|
+
message_tokens = self.estimate_tokens_for_message(message)
|
|
755
|
+
|
|
756
|
+
# If adding this message would exceed protected tokens, stop here
|
|
757
|
+
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
758
|
+
break
|
|
759
|
+
|
|
760
|
+
protected_messages.append(message)
|
|
761
|
+
protected_token_count += message_tokens
|
|
762
|
+
|
|
763
|
+
# Messages that were added while scanning backwards are currently in reverse order.
|
|
764
|
+
# Reverse them to restore chronological ordering, then prepend the system prompt.
|
|
765
|
+
protected_messages.reverse()
|
|
766
|
+
protected_messages.insert(0, system_message)
|
|
767
|
+
|
|
768
|
+
# Messages to summarize are everything between the system message and the
|
|
769
|
+
# protected tail zone we just constructed.
|
|
770
|
+
protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
|
|
771
|
+
|
|
772
|
+
# IMPORTANT: Adjust split point to avoid breaking tool_use/tool_result pairs
|
|
773
|
+
# The LLM requires every tool_use to have its tool_result immediately after
|
|
774
|
+
protected_start_idx = self._find_safe_split_index(messages, protected_start_idx)
|
|
775
|
+
|
|
776
|
+
messages_to_summarize = messages[1:protected_start_idx]
|
|
777
|
+
|
|
778
|
+
# Emit info messages
|
|
779
|
+
emit_info(
|
|
780
|
+
f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
|
|
781
|
+
)
|
|
782
|
+
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
783
|
+
|
|
784
|
+
return messages_to_summarize, protected_messages
|
|
785
|
+
|
|
786
|
+
def summarize_messages(
|
|
787
|
+
self, messages: List[ModelMessage], with_protection: bool = True
|
|
788
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
789
|
+
"""
|
|
790
|
+
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
|
791
|
+
|
|
792
|
+
Returns:
|
|
793
|
+
Tuple of (compacted_messages, summarized_source_messages)
|
|
794
|
+
where compacted_messages always preserves the original system message
|
|
795
|
+
as the first entry.
|
|
796
|
+
"""
|
|
797
|
+
messages_to_summarize: List[ModelMessage]
|
|
798
|
+
protected_messages: List[ModelMessage]
|
|
799
|
+
|
|
800
|
+
if with_protection:
|
|
801
|
+
messages_to_summarize, protected_messages = (
|
|
802
|
+
self.split_messages_for_protected_summarization(messages)
|
|
803
|
+
)
|
|
804
|
+
else:
|
|
805
|
+
messages_to_summarize = messages[1:] if messages else []
|
|
806
|
+
protected_messages = messages[:1]
|
|
807
|
+
|
|
808
|
+
if not messages:
|
|
809
|
+
return [], []
|
|
810
|
+
|
|
811
|
+
system_message = messages[0]
|
|
812
|
+
|
|
813
|
+
if not messages_to_summarize:
|
|
814
|
+
# Nothing to summarize, so just return the original sequence
|
|
815
|
+
return self.prune_interrupted_tool_calls(messages), []
|
|
816
|
+
|
|
817
|
+
instructions = (
|
|
818
|
+
"The input will be a log of Agentic AI steps that have been taken"
|
|
819
|
+
" as well as user queries, etc. Summarize the contents of these steps."
|
|
820
|
+
" The high level details should remain but the bulk of the content from tool-call"
|
|
821
|
+
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
822
|
+
" reading a file, and the file contents are large, then in your summary you might just"
|
|
823
|
+
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
824
|
+
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
825
|
+
"\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
|
|
826
|
+
)
|
|
827
|
+
|
|
828
|
+
try:
|
|
829
|
+
# Prune any orphaned tool calls from messages before sending to LLM
|
|
830
|
+
# The LLM requires every tool_use to have a matching tool_result
|
|
831
|
+
pruned_messages_to_summarize = self.prune_interrupted_tool_calls(
|
|
832
|
+
messages_to_summarize
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
if not pruned_messages_to_summarize:
|
|
836
|
+
# After pruning, nothing left to summarize
|
|
837
|
+
return self.prune_interrupted_tool_calls(messages), []
|
|
838
|
+
|
|
839
|
+
new_messages = run_summarization_sync(
|
|
840
|
+
instructions, message_history=pruned_messages_to_summarize
|
|
841
|
+
)
|
|
842
|
+
|
|
843
|
+
if not isinstance(new_messages, list):
|
|
844
|
+
emit_warning(
|
|
845
|
+
"Summarization agent returned non-list output; wrapping into message request"
|
|
846
|
+
)
|
|
847
|
+
new_messages = [ModelRequest([TextPart(str(new_messages))])]
|
|
848
|
+
|
|
849
|
+
compacted: List[ModelMessage] = [system_message] + list(new_messages)
|
|
850
|
+
|
|
851
|
+
# Drop the system message from protected_messages because we already included it
|
|
852
|
+
protected_tail = [
|
|
853
|
+
msg for msg in protected_messages if msg is not system_message
|
|
854
|
+
]
|
|
855
|
+
|
|
856
|
+
compacted.extend(protected_tail)
|
|
857
|
+
|
|
858
|
+
return self.prune_interrupted_tool_calls(compacted), messages_to_summarize
|
|
859
|
+
except SummarizationError as e:
|
|
860
|
+
# SummarizationError has detailed error info
|
|
861
|
+
emit_error(f"Summarization failed: {e}")
|
|
862
|
+
if e.original_error:
|
|
863
|
+
emit_warning(
|
|
864
|
+
f"💡 Tip: Underlying error was {type(e.original_error).__name__}. "
|
|
865
|
+
"Consider using '/set compaction_strategy=truncation' as a fallback."
|
|
866
|
+
)
|
|
867
|
+
return messages, [] # Return original messages on failure
|
|
868
|
+
except Exception as e:
|
|
869
|
+
# Catch-all for unexpected errors
|
|
870
|
+
error_type = type(e).__name__
|
|
871
|
+
error_msg = str(e) if str(e) else "(no error details)"
|
|
872
|
+
emit_error(
|
|
873
|
+
f"Unexpected error during compaction: [{error_type}] {error_msg}"
|
|
874
|
+
)
|
|
875
|
+
return messages, [] # Return original messages on failure
|
|
876
|
+
|
|
877
|
+
def get_model_context_length(self) -> int:
|
|
878
|
+
"""
|
|
879
|
+
Return the context length for this agent's effective model.
|
|
880
|
+
|
|
881
|
+
Honors per-agent pinned model via `self.get_model_name()`; falls back
|
|
882
|
+
to global model when no pin is set. Defaults conservatively on failure.
|
|
883
|
+
"""
|
|
884
|
+
try:
|
|
885
|
+
model_configs = ModelFactory.load_config()
|
|
886
|
+
# Use the agent's effective model (respects /pin_model)
|
|
887
|
+
model_name = self.get_model_name()
|
|
888
|
+
model_config = model_configs.get(model_name, {})
|
|
889
|
+
context_length = model_config.get("context_length", 128000)
|
|
890
|
+
return int(context_length)
|
|
891
|
+
except Exception:
|
|
892
|
+
# Be safe; don't blow up status/compaction if model lookup fails
|
|
893
|
+
return 128000
|
|
894
|
+
|
|
895
|
+
def has_pending_tool_calls(self, messages: List[ModelMessage]) -> bool:
|
|
896
|
+
"""
|
|
897
|
+
Check if there are any pending tool calls in the message history.
|
|
898
|
+
|
|
899
|
+
A pending tool call is one that has a ToolCallPart without a corresponding
|
|
900
|
+
ToolReturnPart. This indicates the model is still waiting for tool execution.
|
|
901
|
+
|
|
902
|
+
Returns:
|
|
903
|
+
True if there are pending tool calls, False otherwise
|
|
904
|
+
"""
|
|
905
|
+
if not messages:
|
|
906
|
+
return False
|
|
907
|
+
|
|
908
|
+
tool_call_ids: Set[str] = set()
|
|
909
|
+
tool_return_ids: Set[str] = set()
|
|
910
|
+
|
|
911
|
+
# Collect all tool call and return IDs
|
|
912
|
+
for msg in messages:
|
|
913
|
+
for part in getattr(msg, "parts", []) or []:
|
|
914
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
915
|
+
if not tool_call_id:
|
|
916
|
+
continue
|
|
917
|
+
|
|
918
|
+
if part.part_kind == "tool-call":
|
|
919
|
+
tool_call_ids.add(tool_call_id)
|
|
920
|
+
elif part.part_kind == "tool-return":
|
|
921
|
+
tool_return_ids.add(tool_call_id)
|
|
922
|
+
|
|
923
|
+
# Pending tool calls are those without corresponding returns
|
|
924
|
+
pending_calls = tool_call_ids - tool_return_ids
|
|
925
|
+
return len(pending_calls) > 0
|
|
926
|
+
|
|
927
|
+
def request_delayed_compaction(self) -> None:
|
|
928
|
+
"""
|
|
929
|
+
Request that compaction be attempted after the current tool calls complete.
|
|
930
|
+
|
|
931
|
+
This sets a global flag that will be checked during the next message
|
|
932
|
+
processing cycle to trigger compaction when it's safe to do so.
|
|
933
|
+
"""
|
|
934
|
+
global _delayed_compaction_requested
|
|
935
|
+
_delayed_compaction_requested = True
|
|
936
|
+
emit_info(
|
|
937
|
+
"🔄 Delayed compaction requested - will attempt after tool calls complete",
|
|
938
|
+
message_group="token_context_status",
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
def should_attempt_delayed_compaction(self) -> bool:
|
|
942
|
+
"""
|
|
943
|
+
Check if delayed compaction was requested and it's now safe to proceed.
|
|
944
|
+
|
|
945
|
+
Returns:
|
|
946
|
+
True if delayed compaction was requested and no tool calls are pending
|
|
947
|
+
"""
|
|
948
|
+
global _delayed_compaction_requested
|
|
949
|
+
if not _delayed_compaction_requested:
|
|
950
|
+
return False
|
|
951
|
+
|
|
952
|
+
# Check if it's now safe to compact
|
|
953
|
+
messages = self.get_message_history()
|
|
954
|
+
if not self.has_pending_tool_calls(messages):
|
|
955
|
+
_delayed_compaction_requested = False # Reset the flag
|
|
956
|
+
return True
|
|
957
|
+
|
|
958
|
+
return False
|
|
959
|
+
|
|
960
|
+
def get_pending_tool_call_count(self, messages: List[ModelMessage]) -> int:
|
|
961
|
+
"""
|
|
962
|
+
Get the count of pending tool calls for debugging purposes.
|
|
963
|
+
|
|
964
|
+
Returns:
|
|
965
|
+
Number of tool calls waiting for execution
|
|
966
|
+
"""
|
|
967
|
+
if not messages:
|
|
968
|
+
return 0
|
|
969
|
+
|
|
970
|
+
tool_call_ids: Set[str] = set()
|
|
971
|
+
tool_return_ids: Set[str] = set()
|
|
972
|
+
|
|
973
|
+
for msg in messages:
|
|
974
|
+
for part in getattr(msg, "parts", []) or []:
|
|
975
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
976
|
+
if not tool_call_id:
|
|
977
|
+
continue
|
|
978
|
+
|
|
979
|
+
if part.part_kind == "tool-call":
|
|
980
|
+
tool_call_ids.add(tool_call_id)
|
|
981
|
+
elif part.part_kind == "tool-return":
|
|
982
|
+
tool_return_ids.add(tool_call_id)
|
|
983
|
+
|
|
984
|
+
pending_calls = tool_call_ids - tool_return_ids
|
|
985
|
+
return len(pending_calls)
|
|
986
|
+
|
|
987
|
+
def prune_interrupted_tool_calls(
|
|
988
|
+
self, messages: List[ModelMessage]
|
|
989
|
+
) -> List[ModelMessage]:
|
|
990
|
+
"""
|
|
991
|
+
Remove any messages that participate in mismatched tool call sequences.
|
|
992
|
+
|
|
993
|
+
A mismatched tool call id is one that appears in a ToolCall (model/tool request)
|
|
994
|
+
without a corresponding tool return, or vice versa. We preserve original order
|
|
995
|
+
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
996
|
+
"""
|
|
997
|
+
if not messages:
|
|
998
|
+
return messages
|
|
999
|
+
|
|
1000
|
+
tool_call_ids: Set[str] = set()
|
|
1001
|
+
tool_return_ids: Set[str] = set()
|
|
1002
|
+
|
|
1003
|
+
# First pass: collect ids for calls vs returns
|
|
1004
|
+
for msg in messages:
|
|
1005
|
+
for part in getattr(msg, "parts", []) or []:
|
|
1006
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
1007
|
+
if not tool_call_id:
|
|
1008
|
+
continue
|
|
1009
|
+
# Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
|
|
1010
|
+
# consider it a call; otherwise it's a return/result.
|
|
1011
|
+
if part.part_kind == "tool-call":
|
|
1012
|
+
tool_call_ids.add(tool_call_id)
|
|
1013
|
+
else:
|
|
1014
|
+
tool_return_ids.add(tool_call_id)
|
|
1015
|
+
|
|
1016
|
+
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
1017
|
+
if not mismatched:
|
|
1018
|
+
return messages
|
|
1019
|
+
|
|
1020
|
+
pruned: List[ModelMessage] = []
|
|
1021
|
+
dropped_count = 0
|
|
1022
|
+
for msg in messages:
|
|
1023
|
+
has_mismatched = False
|
|
1024
|
+
for part in getattr(msg, "parts", []) or []:
|
|
1025
|
+
tcid = getattr(part, "tool_call_id", None)
|
|
1026
|
+
if tcid and tcid in mismatched:
|
|
1027
|
+
has_mismatched = True
|
|
1028
|
+
break
|
|
1029
|
+
if has_mismatched:
|
|
1030
|
+
dropped_count += 1
|
|
1031
|
+
continue
|
|
1032
|
+
pruned.append(msg)
|
|
1033
|
+
return pruned
|
|
1034
|
+
|
|
1035
|
+
def message_history_processor(
|
|
1036
|
+
self, ctx: RunContext, messages: List[ModelMessage]
|
|
1037
|
+
) -> List[ModelMessage]:
|
|
1038
|
+
# First, prune any interrupted/mismatched tool-call conversations
|
|
1039
|
+
model_max = self.get_model_context_length()
|
|
1040
|
+
|
|
1041
|
+
message_tokens = sum(self.estimate_tokens_for_message(msg) for msg in messages)
|
|
1042
|
+
context_overhead = self.estimate_context_overhead_tokens()
|
|
1043
|
+
total_current_tokens = message_tokens + context_overhead
|
|
1044
|
+
proportion_used = total_current_tokens / model_max
|
|
1045
|
+
|
|
1046
|
+
context_summary = SpinnerBase.format_context_info(
|
|
1047
|
+
total_current_tokens, model_max, proportion_used
|
|
1048
|
+
)
|
|
1049
|
+
update_spinner_context(context_summary)
|
|
1050
|
+
|
|
1051
|
+
# Get the configured compaction threshold
|
|
1052
|
+
compaction_threshold = get_compaction_threshold()
|
|
1053
|
+
|
|
1054
|
+
# Get the configured compaction strategy
|
|
1055
|
+
compaction_strategy = get_compaction_strategy()
|
|
1056
|
+
|
|
1057
|
+
if proportion_used > compaction_threshold:
|
|
1058
|
+
# RACE CONDITION PROTECTION: Check for pending tool calls before summarization
|
|
1059
|
+
if compaction_strategy == "summarization" and self.has_pending_tool_calls(
|
|
1060
|
+
messages
|
|
1061
|
+
):
|
|
1062
|
+
pending_count = self.get_pending_tool_call_count(messages)
|
|
1063
|
+
emit_warning(
|
|
1064
|
+
f"⚠️ Summarization deferred: {pending_count} pending tool call(s) detected. "
|
|
1065
|
+
"Waiting for tool execution to complete before compaction.",
|
|
1066
|
+
message_group="token_context_status",
|
|
1067
|
+
)
|
|
1068
|
+
# Request delayed compaction for when tool calls complete
|
|
1069
|
+
self.request_delayed_compaction()
|
|
1070
|
+
# Return original messages without compaction
|
|
1071
|
+
return messages, []
|
|
1072
|
+
|
|
1073
|
+
if compaction_strategy == "truncation":
|
|
1074
|
+
# Use truncation instead of summarization
|
|
1075
|
+
protected_tokens = get_protected_token_count()
|
|
1076
|
+
filtered_messages = self.filter_huge_messages(messages)
|
|
1077
|
+
result_messages = self.truncation(filtered_messages, protected_tokens)
|
|
1078
|
+
# Track dropped messages by hash so message_history_accumulator
|
|
1079
|
+
# won't re-inject them from pydantic-ai's full message list on
|
|
1080
|
+
# subsequent calls within the same run (fixes ghost-task bug).
|
|
1081
|
+
result_hashes = {self.hash_message(m) for m in result_messages}
|
|
1082
|
+
summarized_messages = [
|
|
1083
|
+
m
|
|
1084
|
+
for m in filtered_messages
|
|
1085
|
+
if self.hash_message(m) not in result_hashes
|
|
1086
|
+
]
|
|
1087
|
+
else:
|
|
1088
|
+
# Default to summarization (safe to proceed - no pending tool calls)
|
|
1089
|
+
result_messages, summarized_messages = self.summarize_messages(
|
|
1090
|
+
self.filter_huge_messages(messages)
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
final_token_count = sum(
|
|
1094
|
+
self.estimate_tokens_for_message(msg) for msg in result_messages
|
|
1095
|
+
)
|
|
1096
|
+
# Update spinner with final token count
|
|
1097
|
+
final_summary = SpinnerBase.format_context_info(
|
|
1098
|
+
final_token_count, model_max, final_token_count / model_max
|
|
1099
|
+
)
|
|
1100
|
+
update_spinner_context(final_summary)
|
|
1101
|
+
|
|
1102
|
+
self.set_message_history(result_messages)
|
|
1103
|
+
for m in summarized_messages:
|
|
1104
|
+
self.add_compacted_message_hash(self.hash_message(m))
|
|
1105
|
+
return result_messages
|
|
1106
|
+
return messages
|
|
1107
|
+
|
|
1108
|
+
def truncation(
|
|
1109
|
+
self, messages: List[ModelMessage], protected_tokens: int
|
|
1110
|
+
) -> List[ModelMessage]:
|
|
1111
|
+
"""
|
|
1112
|
+
Truncate message history to manage token usage.
|
|
1113
|
+
|
|
1114
|
+
Protects:
|
|
1115
|
+
- The first message (system prompt) - always kept
|
|
1116
|
+
- The second message if it contains a ThinkingPart (extended thinking context)
|
|
1117
|
+
- The most recent messages up to protected_tokens
|
|
1118
|
+
|
|
1119
|
+
Args:
|
|
1120
|
+
messages: List of messages to truncate
|
|
1121
|
+
protected_tokens: Number of tokens to protect
|
|
1122
|
+
|
|
1123
|
+
Returns:
|
|
1124
|
+
Truncated list of messages
|
|
1125
|
+
"""
|
|
1126
|
+
import queue
|
|
1127
|
+
|
|
1128
|
+
emit_info("Truncating message history to manage token usage")
|
|
1129
|
+
result = [messages[0]] # Always keep the first message (system prompt)
|
|
1130
|
+
|
|
1131
|
+
# Check if second message exists and contains a ThinkingPart
|
|
1132
|
+
# If so, protect it (extended thinking context shouldn't be lost)
|
|
1133
|
+
skip_second = False
|
|
1134
|
+
if len(messages) > 1:
|
|
1135
|
+
second_msg = messages[1]
|
|
1136
|
+
has_thinking = any(
|
|
1137
|
+
isinstance(part, ThinkingPart) for part in second_msg.parts
|
|
1138
|
+
)
|
|
1139
|
+
if has_thinking:
|
|
1140
|
+
result.append(second_msg)
|
|
1141
|
+
skip_second = True
|
|
1142
|
+
|
|
1143
|
+
num_tokens = 0
|
|
1144
|
+
stack = queue.LifoQueue()
|
|
1145
|
+
|
|
1146
|
+
# Determine which messages to consider for the recent-tokens window
|
|
1147
|
+
# Skip first message (already added), and skip second if it has thinking
|
|
1148
|
+
start_idx = 2 if skip_second else 1
|
|
1149
|
+
messages_to_scan = messages[start_idx:]
|
|
1150
|
+
|
|
1151
|
+
# Put messages in reverse order (most recent first) into the stack
|
|
1152
|
+
# but break when we exceed protected_tokens
|
|
1153
|
+
for msg in reversed(messages_to_scan):
|
|
1154
|
+
num_tokens += self.estimate_tokens_for_message(msg)
|
|
1155
|
+
if num_tokens > protected_tokens:
|
|
1156
|
+
break
|
|
1157
|
+
stack.put(msg)
|
|
1158
|
+
|
|
1159
|
+
# Pop messages from stack to get them in chronological order
|
|
1160
|
+
while not stack.empty():
|
|
1161
|
+
result.append(stack.get())
|
|
1162
|
+
|
|
1163
|
+
result = self.prune_interrupted_tool_calls(result)
|
|
1164
|
+
return result
|
|
1165
|
+
|
|
1166
|
+
def run_summarization_sync(
|
|
1167
|
+
self,
|
|
1168
|
+
instructions: str,
|
|
1169
|
+
message_history: List[ModelMessage],
|
|
1170
|
+
) -> Union[List[ModelMessage], str]:
|
|
1171
|
+
"""
|
|
1172
|
+
Run summarization synchronously using the configured summarization agent.
|
|
1173
|
+
This is exposed as a method so it can be overridden by subclasses if needed.
|
|
1174
|
+
|
|
1175
|
+
Args:
|
|
1176
|
+
instructions: Instructions for the summarization agent
|
|
1177
|
+
message_history: List of messages to summarize
|
|
1178
|
+
|
|
1179
|
+
Returns:
|
|
1180
|
+
Summarized messages or text
|
|
1181
|
+
"""
|
|
1182
|
+
return run_summarization_sync(instructions, message_history)
|
|
1183
|
+
|
|
1184
|
+
# ===== Agent wiring formerly in code_puppy/agent.py =====
|
|
1185
|
+
def load_puppy_rules(self) -> Optional[str]:
|
|
1186
|
+
"""Load AGENT(S).md from both global config and project directory.
|
|
1187
|
+
|
|
1188
|
+
Checks for AGENTS.md/AGENT.md/agents.md/agent.md in this order:
|
|
1189
|
+
1. Global config directory (~/.code_puppy/ or XDG config)
|
|
1190
|
+
2. Current working directory (project-specific)
|
|
1191
|
+
|
|
1192
|
+
If both exist, they are combined with global rules first, then project rules.
|
|
1193
|
+
This allows project-specific rules to override or extend global rules.
|
|
1194
|
+
"""
|
|
1195
|
+
if self._puppy_rules is not None:
|
|
1196
|
+
return self._puppy_rules
|
|
1197
|
+
from pathlib import Path
|
|
1198
|
+
|
|
1199
|
+
possible_paths = ["AGENTS.md", "AGENT.md", "agents.md", "agent.md"]
|
|
1200
|
+
|
|
1201
|
+
# Load global rules from CONFIG_DIR
|
|
1202
|
+
global_rules = None
|
|
1203
|
+
from code_puppy.config import CONFIG_DIR
|
|
1204
|
+
|
|
1205
|
+
for path_str in possible_paths:
|
|
1206
|
+
global_path = Path(CONFIG_DIR) / path_str
|
|
1207
|
+
if global_path.exists():
|
|
1208
|
+
global_rules = global_path.read_text(encoding="utf-8-sig")
|
|
1209
|
+
break
|
|
1210
|
+
|
|
1211
|
+
# Load project-local rules from current working directory
|
|
1212
|
+
project_rules = None
|
|
1213
|
+
for path_str in possible_paths:
|
|
1214
|
+
project_path = Path(path_str)
|
|
1215
|
+
if project_path.exists():
|
|
1216
|
+
project_rules = project_path.read_text(encoding="utf-8-sig")
|
|
1217
|
+
break
|
|
1218
|
+
|
|
1219
|
+
# Combine global and project rules
|
|
1220
|
+
# Global rules come first, project rules second (allowing project to override)
|
|
1221
|
+
rules = [r for r in [global_rules, project_rules] if r]
|
|
1222
|
+
self._puppy_rules = "\n\n".join(rules) if rules else None
|
|
1223
|
+
return self._puppy_rules
|
|
1224
|
+
|
|
1225
|
+
def load_mcp_servers(self, extra_headers: Optional[Dict[str, str]] = None):
|
|
1226
|
+
"""Load MCP servers through the manager and return pydantic-ai compatible servers.
|
|
1227
|
+
|
|
1228
|
+
Note: The manager automatically syncs from mcp_servers.json during initialization,
|
|
1229
|
+
so we don't need to sync here. Use reload_mcp_servers() to force a re-sync.
|
|
1230
|
+
"""
|
|
1231
|
+
|
|
1232
|
+
mcp_disabled = get_value("disable_mcp_servers")
|
|
1233
|
+
if mcp_disabled and str(mcp_disabled).lower() in ("1", "true", "yes", "on"):
|
|
1234
|
+
return []
|
|
1235
|
+
|
|
1236
|
+
manager = get_mcp_manager()
|
|
1237
|
+
return manager.get_servers_for_agent()
|
|
1238
|
+
|
|
1239
|
+
def reload_mcp_servers(self):
|
|
1240
|
+
"""Reload MCP servers and return updated servers.
|
|
1241
|
+
|
|
1242
|
+
Forces a re-sync from mcp_servers.json to pick up any configuration changes.
|
|
1243
|
+
"""
|
|
1244
|
+
# Clear the MCP tool cache when servers are reloaded
|
|
1245
|
+
self._mcp_tool_definitions_cache = []
|
|
1246
|
+
|
|
1247
|
+
# Force re-sync from mcp_servers.json
|
|
1248
|
+
manager = get_mcp_manager()
|
|
1249
|
+
manager.sync_from_config()
|
|
1250
|
+
|
|
1251
|
+
return manager.get_servers_for_agent()
|
|
1252
|
+
|
|
1253
|
+
def _load_model_with_fallback(
|
|
1254
|
+
self,
|
|
1255
|
+
requested_model_name: str,
|
|
1256
|
+
models_config: Dict[str, Any],
|
|
1257
|
+
message_group: str,
|
|
1258
|
+
) -> Tuple[Any, str]:
|
|
1259
|
+
"""Load the requested model, applying a friendly fallback when unavailable."""
|
|
1260
|
+
try:
|
|
1261
|
+
model = ModelFactory.get_model(requested_model_name, models_config)
|
|
1262
|
+
return model, requested_model_name
|
|
1263
|
+
except ValueError as exc:
|
|
1264
|
+
available_models = list(models_config.keys())
|
|
1265
|
+
available_str = (
|
|
1266
|
+
", ".join(sorted(available_models))
|
|
1267
|
+
if available_models
|
|
1268
|
+
else "no configured models"
|
|
1269
|
+
)
|
|
1270
|
+
emit_warning(
|
|
1271
|
+
(
|
|
1272
|
+
f"Model '{requested_model_name}' not found. "
|
|
1273
|
+
f"Available models: {available_str}"
|
|
1274
|
+
),
|
|
1275
|
+
message_group=message_group,
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
fallback_candidates: List[str] = []
|
|
1279
|
+
global_candidate = get_global_model_name()
|
|
1280
|
+
if global_candidate:
|
|
1281
|
+
fallback_candidates.append(global_candidate)
|
|
1282
|
+
|
|
1283
|
+
for candidate in available_models:
|
|
1284
|
+
if candidate not in fallback_candidates:
|
|
1285
|
+
fallback_candidates.append(candidate)
|
|
1286
|
+
|
|
1287
|
+
for candidate in fallback_candidates:
|
|
1288
|
+
if not candidate or candidate == requested_model_name:
|
|
1289
|
+
continue
|
|
1290
|
+
try:
|
|
1291
|
+
model = ModelFactory.get_model(candidate, models_config)
|
|
1292
|
+
emit_info(
|
|
1293
|
+
f"Using fallback model: {candidate}",
|
|
1294
|
+
message_group=message_group,
|
|
1295
|
+
)
|
|
1296
|
+
return model, candidate
|
|
1297
|
+
except ValueError:
|
|
1298
|
+
continue
|
|
1299
|
+
|
|
1300
|
+
friendly_message = (
|
|
1301
|
+
"No valid model could be loaded. Update the model configuration or set "
|
|
1302
|
+
"a valid model with `config set`."
|
|
1303
|
+
)
|
|
1304
|
+
emit_error(
|
|
1305
|
+
friendly_message,
|
|
1306
|
+
message_group=message_group,
|
|
1307
|
+
)
|
|
1308
|
+
raise ValueError(friendly_message) from exc
|
|
1309
|
+
|
|
1310
|
+
def reload_code_generation_agent(self, message_group: Optional[str] = None):
|
|
1311
|
+
"""Force-reload the pydantic-ai Agent based on current config and model."""
|
|
1312
|
+
from code_puppy.tools import (
|
|
1313
|
+
EXTENDED_THINKING_PROMPT_NOTE,
|
|
1314
|
+
has_extended_thinking_active,
|
|
1315
|
+
register_tools_for_agent,
|
|
1316
|
+
)
|
|
1317
|
+
|
|
1318
|
+
# Invalidate the project-local rules cache so a fresh read from the
|
|
1319
|
+
# current working directory is performed on the next load_puppy_rules()
|
|
1320
|
+
# call. This is critical for /cd: the user may have switched to a
|
|
1321
|
+
# different project that has its own AGENT.md (or none at all).
|
|
1322
|
+
self._puppy_rules = None
|
|
1323
|
+
|
|
1324
|
+
if message_group is None:
|
|
1325
|
+
message_group = str(uuid.uuid4())
|
|
1326
|
+
|
|
1327
|
+
model_name = self.get_model_name()
|
|
1328
|
+
|
|
1329
|
+
models_config = ModelFactory.load_config()
|
|
1330
|
+
model, resolved_model_name = self._load_model_with_fallback(
|
|
1331
|
+
model_name,
|
|
1332
|
+
models_config,
|
|
1333
|
+
message_group,
|
|
1334
|
+
)
|
|
1335
|
+
|
|
1336
|
+
instructions = self.get_full_system_prompt()
|
|
1337
|
+
puppy_rules = self.load_puppy_rules()
|
|
1338
|
+
if puppy_rules:
|
|
1339
|
+
instructions += f"\n{puppy_rules}"
|
|
1340
|
+
|
|
1341
|
+
mcp_servers = self.load_mcp_servers()
|
|
1342
|
+
|
|
1343
|
+
model_settings = make_model_settings(resolved_model_name)
|
|
1344
|
+
|
|
1345
|
+
# Handle claude-code models: swap instructions (prompt prepending happens in run_with_mcp)
|
|
1346
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1347
|
+
|
|
1348
|
+
# When extended thinking is active, nudge the model to think between
|
|
1349
|
+
# tool calls (the share_your_reasoning tool is stripped in this case).
|
|
1350
|
+
if has_extended_thinking_active(resolved_model_name):
|
|
1351
|
+
instructions += EXTENDED_THINKING_PROMPT_NOTE
|
|
1352
|
+
|
|
1353
|
+
prepared = prepare_prompt_for_model(
|
|
1354
|
+
model_name, instructions, "", prepend_system_to_user=False
|
|
1355
|
+
)
|
|
1356
|
+
instructions = prepared.instructions
|
|
1357
|
+
|
|
1358
|
+
self.cur_model = model
|
|
1359
|
+
p_agent = PydanticAgent(
|
|
1360
|
+
model=model,
|
|
1361
|
+
instructions=instructions,
|
|
1362
|
+
output_type=str,
|
|
1363
|
+
retries=3,
|
|
1364
|
+
toolsets=mcp_servers,
|
|
1365
|
+
history_processors=[self.message_history_accumulator],
|
|
1366
|
+
model_settings=model_settings,
|
|
1367
|
+
)
|
|
1368
|
+
|
|
1369
|
+
agent_tools = self.get_available_tools()
|
|
1370
|
+
register_tools_for_agent(p_agent, agent_tools, model_name=resolved_model_name)
|
|
1371
|
+
|
|
1372
|
+
# Get existing tool names to filter out conflicts with MCP tools
|
|
1373
|
+
existing_tool_names = set()
|
|
1374
|
+
try:
|
|
1375
|
+
# Get tools from the agent to find existing tool names
|
|
1376
|
+
tools = getattr(p_agent, "_tools", None)
|
|
1377
|
+
if tools:
|
|
1378
|
+
existing_tool_names = set(tools.keys())
|
|
1379
|
+
except Exception:
|
|
1380
|
+
# If we can't get tool names, proceed without filtering
|
|
1381
|
+
pass
|
|
1382
|
+
|
|
1383
|
+
# Filter MCP server toolsets to remove conflicting tools
|
|
1384
|
+
filtered_mcp_servers = []
|
|
1385
|
+
if mcp_servers and existing_tool_names:
|
|
1386
|
+
for mcp_server in mcp_servers:
|
|
1387
|
+
try:
|
|
1388
|
+
# Get tools from this MCP server
|
|
1389
|
+
server_tools = getattr(mcp_server, "tools", None)
|
|
1390
|
+
if server_tools:
|
|
1391
|
+
# Filter out conflicting tools
|
|
1392
|
+
filtered_tools = {}
|
|
1393
|
+
for tool_name, tool_func in server_tools.items():
|
|
1394
|
+
if tool_name not in existing_tool_names:
|
|
1395
|
+
filtered_tools[tool_name] = tool_func
|
|
1396
|
+
|
|
1397
|
+
# Create a filtered version of the MCP server if we have tools
|
|
1398
|
+
if filtered_tools:
|
|
1399
|
+
# Create a new toolset with filtered tools
|
|
1400
|
+
from pydantic_ai.tools import ToolSet
|
|
1401
|
+
|
|
1402
|
+
filtered_toolset = ToolSet()
|
|
1403
|
+
for tool_name, tool_func in filtered_tools.items():
|
|
1404
|
+
filtered_toolset._tools[tool_name] = tool_func
|
|
1405
|
+
filtered_mcp_servers.append(filtered_toolset)
|
|
1406
|
+
else:
|
|
1407
|
+
# No tools left after filtering, skip this server
|
|
1408
|
+
pass
|
|
1409
|
+
else:
|
|
1410
|
+
# Can't get tools from this server, include as-is
|
|
1411
|
+
filtered_mcp_servers.append(mcp_server)
|
|
1412
|
+
except Exception:
|
|
1413
|
+
# Error processing this server, include as-is to be safe
|
|
1414
|
+
filtered_mcp_servers.append(mcp_server)
|
|
1415
|
+
else:
|
|
1416
|
+
# No filtering needed or possible
|
|
1417
|
+
filtered_mcp_servers = mcp_servers if mcp_servers else []
|
|
1418
|
+
|
|
1419
|
+
if len(filtered_mcp_servers) != len(mcp_servers):
|
|
1420
|
+
emit_info(
|
|
1421
|
+
Text.from_markup(
|
|
1422
|
+
f"[dim]Filtered {len(mcp_servers) - len(filtered_mcp_servers)} conflicting MCP tools[/dim]"
|
|
1423
|
+
)
|
|
1424
|
+
)
|
|
1425
|
+
|
|
1426
|
+
self._last_model_name = resolved_model_name
|
|
1427
|
+
# expose for run_with_mcp
|
|
1428
|
+
# Wrap it with DBOS, but handle MCP servers separately to avoid serialization issues
|
|
1429
|
+
global _reload_count
|
|
1430
|
+
_reload_count += 1
|
|
1431
|
+
if get_use_dbos():
|
|
1432
|
+
# Don't pass MCP servers to the agent constructor when using DBOS
|
|
1433
|
+
# This prevents the "cannot pickle async_generator object" error
|
|
1434
|
+
# MCP servers will be handled separately in run_with_mcp
|
|
1435
|
+
agent_without_mcp = PydanticAgent(
|
|
1436
|
+
model=model,
|
|
1437
|
+
instructions=instructions,
|
|
1438
|
+
output_type=str,
|
|
1439
|
+
retries=3,
|
|
1440
|
+
toolsets=[], # Don't include MCP servers here
|
|
1441
|
+
history_processors=[self.message_history_accumulator],
|
|
1442
|
+
model_settings=model_settings,
|
|
1443
|
+
)
|
|
1444
|
+
|
|
1445
|
+
# Register regular tools (non-MCP) on the new agent
|
|
1446
|
+
agent_tools = self.get_available_tools()
|
|
1447
|
+
register_tools_for_agent(
|
|
1448
|
+
agent_without_mcp, agent_tools, model_name=resolved_model_name
|
|
1449
|
+
)
|
|
1450
|
+
|
|
1451
|
+
# Wrap with DBOS - pass event_stream_handler at construction time
|
|
1452
|
+
# so DBOSModel gets the handler for streaming output
|
|
1453
|
+
dbos_agent = DBOSAgent(
|
|
1454
|
+
agent_without_mcp,
|
|
1455
|
+
name=f"{self.name}-{_reload_count}",
|
|
1456
|
+
event_stream_handler=event_stream_handler,
|
|
1457
|
+
)
|
|
1458
|
+
self.pydantic_agent = dbos_agent
|
|
1459
|
+
self._code_generation_agent = dbos_agent
|
|
1460
|
+
|
|
1461
|
+
# Store filtered MCP servers separately for runtime use
|
|
1462
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1463
|
+
else:
|
|
1464
|
+
# Normal path without DBOS - include filtered MCP servers in the agent
|
|
1465
|
+
# Re-create agent with filtered MCP servers
|
|
1466
|
+
p_agent = PydanticAgent(
|
|
1467
|
+
model=model,
|
|
1468
|
+
instructions=instructions,
|
|
1469
|
+
output_type=str,
|
|
1470
|
+
retries=3,
|
|
1471
|
+
toolsets=filtered_mcp_servers,
|
|
1472
|
+
history_processors=[self.message_history_accumulator],
|
|
1473
|
+
model_settings=model_settings,
|
|
1474
|
+
)
|
|
1475
|
+
# Register regular tools on the agent
|
|
1476
|
+
agent_tools = self.get_available_tools()
|
|
1477
|
+
register_tools_for_agent(
|
|
1478
|
+
p_agent, agent_tools, model_name=resolved_model_name
|
|
1479
|
+
)
|
|
1480
|
+
|
|
1481
|
+
self.pydantic_agent = p_agent
|
|
1482
|
+
self._code_generation_agent = p_agent
|
|
1483
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1484
|
+
return self._code_generation_agent
|
|
1485
|
+
|
|
1486
|
+
def _create_agent_with_output_type(self, output_type: Type[Any]) -> PydanticAgent:
|
|
1487
|
+
"""Create a temporary agent configured with a custom output_type.
|
|
1488
|
+
|
|
1489
|
+
This is used when structured output is requested via run_with_mcp.
|
|
1490
|
+
The agent is created fresh with the same configuration as the main agent
|
|
1491
|
+
but with the specified output_type instead of str.
|
|
1492
|
+
|
|
1493
|
+
Args:
|
|
1494
|
+
output_type: The Pydantic model or type for structured output.
|
|
1495
|
+
|
|
1496
|
+
Returns:
|
|
1497
|
+
A configured PydanticAgent (or DBOSAgent wrapper) with the custom output_type.
|
|
1498
|
+
"""
|
|
1499
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1500
|
+
from code_puppy.tools import (
|
|
1501
|
+
EXTENDED_THINKING_PROMPT_NOTE,
|
|
1502
|
+
has_extended_thinking_active,
|
|
1503
|
+
register_tools_for_agent,
|
|
1504
|
+
)
|
|
1505
|
+
|
|
1506
|
+
model_name = self.get_model_name()
|
|
1507
|
+
models_config = ModelFactory.load_config()
|
|
1508
|
+
model, resolved_model_name = self._load_model_with_fallback(
|
|
1509
|
+
model_name, models_config, str(uuid.uuid4())
|
|
1510
|
+
)
|
|
1511
|
+
|
|
1512
|
+
instructions = self.get_full_system_prompt()
|
|
1513
|
+
puppy_rules = self.load_puppy_rules()
|
|
1514
|
+
if puppy_rules:
|
|
1515
|
+
instructions += f"\n{puppy_rules}"
|
|
1516
|
+
|
|
1517
|
+
mcp_servers = getattr(self, "_mcp_servers", []) or []
|
|
1518
|
+
model_settings = make_model_settings(resolved_model_name)
|
|
1519
|
+
|
|
1520
|
+
prepared = prepare_prompt_for_model(
|
|
1521
|
+
model_name, instructions, "", prepend_system_to_user=False
|
|
1522
|
+
)
|
|
1523
|
+
instructions = prepared.instructions
|
|
1524
|
+
|
|
1525
|
+
# When extended thinking is active, nudge the model to think between
|
|
1526
|
+
# tool calls (the share_your_reasoning tool is stripped in this case).
|
|
1527
|
+
if has_extended_thinking_active(resolved_model_name):
|
|
1528
|
+
instructions += EXTENDED_THINKING_PROMPT_NOTE
|
|
1529
|
+
|
|
1530
|
+
global _reload_count
|
|
1531
|
+
_reload_count += 1
|
|
1532
|
+
|
|
1533
|
+
if get_use_dbos():
|
|
1534
|
+
temp_agent = PydanticAgent(
|
|
1535
|
+
model=model,
|
|
1536
|
+
instructions=instructions,
|
|
1537
|
+
output_type=output_type,
|
|
1538
|
+
retries=3,
|
|
1539
|
+
toolsets=[],
|
|
1540
|
+
history_processors=[self.message_history_accumulator],
|
|
1541
|
+
model_settings=model_settings,
|
|
1542
|
+
)
|
|
1543
|
+
agent_tools = self.get_available_tools()
|
|
1544
|
+
register_tools_for_agent(
|
|
1545
|
+
temp_agent, agent_tools, model_name=resolved_model_name
|
|
1546
|
+
)
|
|
1547
|
+
# Pass event_stream_handler at construction time for streaming output
|
|
1548
|
+
dbos_agent = DBOSAgent(
|
|
1549
|
+
temp_agent,
|
|
1550
|
+
name=f"{self.name}-structured-{_reload_count}",
|
|
1551
|
+
event_stream_handler=event_stream_handler,
|
|
1552
|
+
)
|
|
1553
|
+
return dbos_agent
|
|
1554
|
+
else:
|
|
1555
|
+
temp_agent = PydanticAgent(
|
|
1556
|
+
model=model,
|
|
1557
|
+
instructions=instructions,
|
|
1558
|
+
output_type=output_type,
|
|
1559
|
+
retries=3,
|
|
1560
|
+
toolsets=mcp_servers,
|
|
1561
|
+
history_processors=[self.message_history_accumulator],
|
|
1562
|
+
model_settings=model_settings,
|
|
1563
|
+
)
|
|
1564
|
+
agent_tools = self.get_available_tools()
|
|
1565
|
+
register_tools_for_agent(
|
|
1566
|
+
temp_agent, agent_tools, model_name=resolved_model_name
|
|
1567
|
+
)
|
|
1568
|
+
return temp_agent
|
|
1569
|
+
|
|
1570
|
+
# It's okay to decorate it with DBOS.step even if not using DBOS; the decorator is a no-op in that case.
|
|
1571
|
+
@DBOS.step()
|
|
1572
|
+
def message_history_accumulator(self, ctx: RunContext, messages: List[Any]):
|
|
1573
|
+
_message_history = self.get_message_history()
|
|
1574
|
+
|
|
1575
|
+
# Hook: on_message_history_processor_start - dump the message history before processing
|
|
1576
|
+
on_message_history_processor_start(
|
|
1577
|
+
agent_name=self.name,
|
|
1578
|
+
session_id=getattr(self, "session_id", None),
|
|
1579
|
+
message_history=list(_message_history), # Copy to avoid mutation issues
|
|
1580
|
+
incoming_messages=list(messages),
|
|
1581
|
+
)
|
|
1582
|
+
message_history_hashes = set([self.hash_message(m) for m in _message_history])
|
|
1583
|
+
messages_added = 0
|
|
1584
|
+
last_msg_index = len(messages) - 1
|
|
1585
|
+
for i, msg in enumerate(messages):
|
|
1586
|
+
msg_hash = self.hash_message(msg)
|
|
1587
|
+
if msg_hash not in message_history_hashes:
|
|
1588
|
+
# Always preserve the last message (the user's new prompt) even
|
|
1589
|
+
# if its hash matches a previously compacted/summarized message.
|
|
1590
|
+
# Short or repeated prompts (e.g. "yes", "1") can collide with
|
|
1591
|
+
# compacted hashes, which would silently drop the user's input
|
|
1592
|
+
# and leave the history ending with a ModelResponse. That
|
|
1593
|
+
# triggers an Anthropic API error: "This model does not support
|
|
1594
|
+
# assistant message prefill."
|
|
1595
|
+
if (
|
|
1596
|
+
i == last_msg_index
|
|
1597
|
+
or msg_hash not in self.get_compacted_message_hashes()
|
|
1598
|
+
):
|
|
1599
|
+
_message_history.append(msg)
|
|
1600
|
+
messages_added += 1
|
|
1601
|
+
|
|
1602
|
+
# Apply message history trimming using the main processor
|
|
1603
|
+
# This ensures we maintain global state while still managing context limits
|
|
1604
|
+
self.message_history_processor(ctx, _message_history)
|
|
1605
|
+
result_messages_filtered_empty_thinking = []
|
|
1606
|
+
filtered_count = 0
|
|
1607
|
+
for msg in self.get_message_history():
|
|
1608
|
+
# Filter out single-part messages that are empty ThinkingParts
|
|
1609
|
+
if len(msg.parts) == 1 and isinstance(msg.parts[0], ThinkingPart):
|
|
1610
|
+
if not msg.parts[0].content:
|
|
1611
|
+
filtered_count += 1
|
|
1612
|
+
continue
|
|
1613
|
+
# For multi-part messages, strip empty ThinkingParts but keep the message
|
|
1614
|
+
elif any(isinstance(p, ThinkingPart) and not p.content for p in msg.parts):
|
|
1615
|
+
msg = dataclasses.replace(
|
|
1616
|
+
msg,
|
|
1617
|
+
parts=[
|
|
1618
|
+
p
|
|
1619
|
+
for p in msg.parts
|
|
1620
|
+
if not (isinstance(p, ThinkingPart) and not p.content)
|
|
1621
|
+
],
|
|
1622
|
+
)
|
|
1623
|
+
if not msg.parts:
|
|
1624
|
+
filtered_count += 1
|
|
1625
|
+
continue
|
|
1626
|
+
result_messages_filtered_empty_thinking.append(msg)
|
|
1627
|
+
self.set_message_history(result_messages_filtered_empty_thinking)
|
|
1628
|
+
|
|
1629
|
+
# Safety net: ensure history always ends with a ModelRequest.
|
|
1630
|
+
# If compaction or filtering somehow leaves a trailing ModelResponse,
|
|
1631
|
+
# the Anthropic API will reject it with a prefill error.
|
|
1632
|
+
final_history = self.ensure_history_ends_with_request(
|
|
1633
|
+
self.get_message_history()
|
|
1634
|
+
)
|
|
1635
|
+
if final_history != self.get_message_history():
|
|
1636
|
+
self.set_message_history(final_history)
|
|
1637
|
+
|
|
1638
|
+
# Hook: on_message_history_processor_end - dump the message history after processing
|
|
1639
|
+
messages_filtered = len(messages) - messages_added + filtered_count
|
|
1640
|
+
on_message_history_processor_end(
|
|
1641
|
+
agent_name=self.name,
|
|
1642
|
+
session_id=getattr(self, "session_id", None),
|
|
1643
|
+
message_history=list(final_history), # Copy to avoid mutation issues
|
|
1644
|
+
messages_added=messages_added,
|
|
1645
|
+
messages_filtered=messages_filtered,
|
|
1646
|
+
)
|
|
1647
|
+
|
|
1648
|
+
return final_history
|
|
1649
|
+
|
|
1650
|
+
def _spawn_ctrl_x_key_listener(
|
|
1651
|
+
self,
|
|
1652
|
+
stop_event: threading.Event,
|
|
1653
|
+
on_escape: Callable[[], None],
|
|
1654
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1655
|
+
) -> Optional[threading.Thread]:
|
|
1656
|
+
"""Start a keyboard listener thread for CLI sessions.
|
|
1657
|
+
|
|
1658
|
+
Listens for Ctrl+X (shell command cancel) and optionally the configured
|
|
1659
|
+
cancel_agent_key (when not using SIGINT/Ctrl+C).
|
|
1660
|
+
|
|
1661
|
+
Args:
|
|
1662
|
+
stop_event: Event to signal the listener to stop.
|
|
1663
|
+
on_escape: Callback for Ctrl+X (shell command cancel).
|
|
1664
|
+
on_cancel_agent: Optional callback for cancel_agent_key (only used
|
|
1665
|
+
when cancel_agent_uses_signal() returns False).
|
|
1666
|
+
"""
|
|
1667
|
+
try:
|
|
1668
|
+
import sys
|
|
1669
|
+
except ImportError:
|
|
1670
|
+
return None
|
|
1671
|
+
|
|
1672
|
+
stdin = getattr(sys, "stdin", None)
|
|
1673
|
+
if stdin is None or not hasattr(stdin, "isatty"):
|
|
1674
|
+
return None
|
|
1675
|
+
try:
|
|
1676
|
+
if not stdin.isatty():
|
|
1677
|
+
return None
|
|
1678
|
+
except Exception:
|
|
1679
|
+
return None
|
|
1680
|
+
|
|
1681
|
+
def listener() -> None:
|
|
1682
|
+
try:
|
|
1683
|
+
if sys.platform.startswith("win"):
|
|
1684
|
+
self._listen_for_ctrl_x_windows(
|
|
1685
|
+
stop_event, on_escape, on_cancel_agent
|
|
1686
|
+
)
|
|
1687
|
+
else:
|
|
1688
|
+
self._listen_for_ctrl_x_posix(
|
|
1689
|
+
stop_event, on_escape, on_cancel_agent
|
|
1690
|
+
)
|
|
1691
|
+
except Exception:
|
|
1692
|
+
emit_warning(
|
|
1693
|
+
"Key listener stopped unexpectedly; press Ctrl+C to cancel."
|
|
1694
|
+
)
|
|
1695
|
+
|
|
1696
|
+
thread = threading.Thread(
|
|
1697
|
+
target=listener, name="code-puppy-key-listener", daemon=True
|
|
1698
|
+
)
|
|
1699
|
+
thread.start()
|
|
1700
|
+
return thread
|
|
1701
|
+
|
|
1702
|
+
def _listen_for_ctrl_x_windows(
|
|
1703
|
+
self,
|
|
1704
|
+
stop_event: threading.Event,
|
|
1705
|
+
on_escape: Callable[[], None],
|
|
1706
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1707
|
+
) -> None:
|
|
1708
|
+
import msvcrt
|
|
1709
|
+
import time
|
|
1710
|
+
|
|
1711
|
+
# Get the cancel agent char code if we're using keyboard-based cancel
|
|
1712
|
+
cancel_agent_char: Optional[str] = None
|
|
1713
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
1714
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
1715
|
+
|
|
1716
|
+
while not stop_event.is_set():
|
|
1717
|
+
try:
|
|
1718
|
+
if msvcrt.kbhit():
|
|
1719
|
+
key = msvcrt.getwch()
|
|
1720
|
+
if key == "\x18": # Ctrl+X
|
|
1721
|
+
try:
|
|
1722
|
+
on_escape()
|
|
1723
|
+
except Exception:
|
|
1724
|
+
emit_warning(
|
|
1725
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
1726
|
+
)
|
|
1727
|
+
elif (
|
|
1728
|
+
cancel_agent_char
|
|
1729
|
+
and on_cancel_agent
|
|
1730
|
+
and key == cancel_agent_char
|
|
1731
|
+
):
|
|
1732
|
+
try:
|
|
1733
|
+
on_cancel_agent()
|
|
1734
|
+
except Exception:
|
|
1735
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
1736
|
+
except Exception:
|
|
1737
|
+
emit_warning(
|
|
1738
|
+
"Windows key listener error; Ctrl+C is still available for cancel."
|
|
1739
|
+
)
|
|
1740
|
+
return
|
|
1741
|
+
time.sleep(0.05)
|
|
1742
|
+
|
|
1743
|
+
def _listen_for_ctrl_x_posix(
|
|
1744
|
+
self,
|
|
1745
|
+
stop_event: threading.Event,
|
|
1746
|
+
on_escape: Callable[[], None],
|
|
1747
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1748
|
+
) -> None:
|
|
1749
|
+
import select
|
|
1750
|
+
import sys
|
|
1751
|
+
import termios
|
|
1752
|
+
import tty
|
|
1753
|
+
|
|
1754
|
+
# Get the cancel agent char code if we're using keyboard-based cancel
|
|
1755
|
+
cancel_agent_char: Optional[str] = None
|
|
1756
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
1757
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
1758
|
+
|
|
1759
|
+
stdin = sys.stdin
|
|
1760
|
+
try:
|
|
1761
|
+
fd = stdin.fileno()
|
|
1762
|
+
except (AttributeError, ValueError, OSError):
|
|
1763
|
+
return
|
|
1764
|
+
try:
|
|
1765
|
+
original_attrs = termios.tcgetattr(fd)
|
|
1766
|
+
except Exception:
|
|
1767
|
+
return
|
|
1768
|
+
|
|
1769
|
+
try:
|
|
1770
|
+
tty.setcbreak(fd)
|
|
1771
|
+
while not stop_event.is_set():
|
|
1772
|
+
try:
|
|
1773
|
+
read_ready, _, _ = select.select([stdin], [], [], 0.05)
|
|
1774
|
+
except Exception:
|
|
1775
|
+
break
|
|
1776
|
+
if not read_ready:
|
|
1777
|
+
continue
|
|
1778
|
+
data = stdin.read(1)
|
|
1779
|
+
if not data:
|
|
1780
|
+
break
|
|
1781
|
+
if data == "\x18": # Ctrl+X
|
|
1782
|
+
try:
|
|
1783
|
+
on_escape()
|
|
1784
|
+
except Exception:
|
|
1785
|
+
emit_warning(
|
|
1786
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
1787
|
+
)
|
|
1788
|
+
elif (
|
|
1789
|
+
cancel_agent_char and on_cancel_agent and data == cancel_agent_char
|
|
1790
|
+
):
|
|
1791
|
+
try:
|
|
1792
|
+
on_cancel_agent()
|
|
1793
|
+
except Exception:
|
|
1794
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
1795
|
+
finally:
|
|
1796
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs)
|
|
1797
|
+
|
|
1798
|
+
async def run_with_mcp(
|
|
1799
|
+
self,
|
|
1800
|
+
prompt: str,
|
|
1801
|
+
*,
|
|
1802
|
+
attachments: Optional[Sequence[BinaryContent]] = None,
|
|
1803
|
+
link_attachments: Optional[Sequence[Union[ImageUrl, DocumentUrl]]] = None,
|
|
1804
|
+
output_type: Optional[Type[Any]] = None,
|
|
1805
|
+
**kwargs,
|
|
1806
|
+
) -> Any:
|
|
1807
|
+
"""Run the agent with MCP servers, attachments, and full cancellation support.
|
|
1808
|
+
|
|
1809
|
+
Args:
|
|
1810
|
+
prompt: Primary user prompt text (may be empty when attachments present).
|
|
1811
|
+
attachments: Local binary payloads (e.g., dragged images) to include.
|
|
1812
|
+
link_attachments: Remote assets (image/document URLs) to include.
|
|
1813
|
+
output_type: Optional Pydantic model or type for structured output.
|
|
1814
|
+
When provided, creates a temporary agent configured to return
|
|
1815
|
+
this type instead of the default string output.
|
|
1816
|
+
**kwargs: Additional arguments forwarded to `pydantic_ai.Agent.run`.
|
|
1817
|
+
|
|
1818
|
+
Returns:
|
|
1819
|
+
The agent's response (typed according to output_type if specified).
|
|
1820
|
+
|
|
1821
|
+
Raises:
|
|
1822
|
+
asyncio.CancelledError: When execution is cancelled by user.
|
|
1823
|
+
"""
|
|
1824
|
+
# Sanitize prompt to remove invalid Unicode surrogates that can cause
|
|
1825
|
+
# encoding errors (especially common on Windows with copy-paste)
|
|
1826
|
+
if prompt:
|
|
1827
|
+
try:
|
|
1828
|
+
prompt = prompt.encode("utf-8", errors="surrogatepass").decode(
|
|
1829
|
+
"utf-8", errors="replace"
|
|
1830
|
+
)
|
|
1831
|
+
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
1832
|
+
# Fallback: filter out surrogate characters directly
|
|
1833
|
+
prompt = "".join(
|
|
1834
|
+
char if ord(char) < 0xD800 or ord(char) > 0xDFFF else "\ufffd"
|
|
1835
|
+
for char in prompt
|
|
1836
|
+
)
|
|
1837
|
+
|
|
1838
|
+
group_id = str(uuid.uuid4())
|
|
1839
|
+
# Avoid double-loading: reuse existing agent if already built
|
|
1840
|
+
pydantic_agent = (
|
|
1841
|
+
self._code_generation_agent or self.reload_code_generation_agent()
|
|
1842
|
+
)
|
|
1843
|
+
|
|
1844
|
+
# If a custom output_type is specified, create a temporary agent with that type
|
|
1845
|
+
if output_type is not None:
|
|
1846
|
+
pydantic_agent = self._create_agent_with_output_type(output_type)
|
|
1847
|
+
|
|
1848
|
+
# Handle model-specific prompt transformations via prepare_prompt_for_model()
|
|
1849
|
+
# This uses the get_model_system_prompt hook, so plugins can register their own handlers
|
|
1850
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1851
|
+
|
|
1852
|
+
# Only prepend system prompt on first message (empty history)
|
|
1853
|
+
should_prepend = len(self.get_message_history()) == 0
|
|
1854
|
+
if should_prepend:
|
|
1855
|
+
system_prompt = self.get_full_system_prompt()
|
|
1856
|
+
puppy_rules = self.load_puppy_rules()
|
|
1857
|
+
if puppy_rules:
|
|
1858
|
+
system_prompt += f"\n{puppy_rules}"
|
|
1859
|
+
|
|
1860
|
+
prepared = prepare_prompt_for_model(
|
|
1861
|
+
model_name=self.get_model_name(),
|
|
1862
|
+
system_prompt=system_prompt,
|
|
1863
|
+
user_prompt=prompt,
|
|
1864
|
+
prepend_system_to_user=True,
|
|
1865
|
+
)
|
|
1866
|
+
prompt = prepared.user_prompt
|
|
1867
|
+
|
|
1868
|
+
# Build combined prompt payload when attachments are provided.
|
|
1869
|
+
attachment_parts: List[Any] = []
|
|
1870
|
+
if attachments:
|
|
1871
|
+
attachment_parts.extend(list(attachments))
|
|
1872
|
+
if link_attachments:
|
|
1873
|
+
attachment_parts.extend(list(link_attachments))
|
|
1874
|
+
|
|
1875
|
+
if attachment_parts:
|
|
1876
|
+
prompt_payload: Union[str, List[Any]] = []
|
|
1877
|
+
if prompt:
|
|
1878
|
+
prompt_payload.append(prompt)
|
|
1879
|
+
prompt_payload.extend(attachment_parts)
|
|
1880
|
+
else:
|
|
1881
|
+
prompt_payload = prompt
|
|
1882
|
+
|
|
1883
|
+
async def run_agent_task():
|
|
1884
|
+
try:
|
|
1885
|
+
self.set_message_history(
|
|
1886
|
+
self.prune_interrupted_tool_calls(self.get_message_history())
|
|
1887
|
+
)
|
|
1888
|
+
|
|
1889
|
+
# DELAYED COMPACTION: Check if we should attempt delayed compaction
|
|
1890
|
+
if self.should_attempt_delayed_compaction():
|
|
1891
|
+
emit_info(
|
|
1892
|
+
"🔄 Attempting delayed compaction (tool calls completed)",
|
|
1893
|
+
message_group="token_context_status",
|
|
1894
|
+
)
|
|
1895
|
+
current_messages = self.get_message_history()
|
|
1896
|
+
compacted_messages, _ = self.compact_messages(current_messages)
|
|
1897
|
+
if compacted_messages != current_messages:
|
|
1898
|
+
self.set_message_history(compacted_messages)
|
|
1899
|
+
emit_info(
|
|
1900
|
+
"✅ Delayed compaction completed successfully",
|
|
1901
|
+
message_group="token_context_status",
|
|
1902
|
+
)
|
|
1903
|
+
|
|
1904
|
+
usage_limits = UsageLimits(request_limit=get_message_limit())
|
|
1905
|
+
|
|
1906
|
+
# Handle MCP servers - add them temporarily when using DBOS
|
|
1907
|
+
if (
|
|
1908
|
+
get_use_dbos()
|
|
1909
|
+
and hasattr(self, "_mcp_servers")
|
|
1910
|
+
and self._mcp_servers
|
|
1911
|
+
):
|
|
1912
|
+
# Temporarily add MCP servers to the DBOS agent using internal _toolsets
|
|
1913
|
+
original_toolsets = pydantic_agent._toolsets
|
|
1914
|
+
pydantic_agent._toolsets = original_toolsets + self._mcp_servers
|
|
1915
|
+
|
|
1916
|
+
try:
|
|
1917
|
+
# Set the workflow ID for DBOS context so DBOS and Code Puppy ID match
|
|
1918
|
+
with SetWorkflowID(group_id):
|
|
1919
|
+
result_ = await pydantic_agent.run(
|
|
1920
|
+
prompt_payload,
|
|
1921
|
+
message_history=self.get_message_history(),
|
|
1922
|
+
usage_limits=usage_limits,
|
|
1923
|
+
event_stream_handler=event_stream_handler,
|
|
1924
|
+
**kwargs,
|
|
1925
|
+
)
|
|
1926
|
+
return result_
|
|
1927
|
+
finally:
|
|
1928
|
+
# Always restore original toolsets
|
|
1929
|
+
pydantic_agent._toolsets = original_toolsets
|
|
1930
|
+
elif get_use_dbos():
|
|
1931
|
+
with SetWorkflowID(group_id):
|
|
1932
|
+
result_ = await pydantic_agent.run(
|
|
1933
|
+
prompt_payload,
|
|
1934
|
+
message_history=self.get_message_history(),
|
|
1935
|
+
usage_limits=usage_limits,
|
|
1936
|
+
event_stream_handler=event_stream_handler,
|
|
1937
|
+
**kwargs,
|
|
1938
|
+
)
|
|
1939
|
+
return result_
|
|
1940
|
+
else:
|
|
1941
|
+
# Non-DBOS path (MCP servers are already included)
|
|
1942
|
+
result_ = await pydantic_agent.run(
|
|
1943
|
+
prompt_payload,
|
|
1944
|
+
message_history=self.get_message_history(),
|
|
1945
|
+
usage_limits=usage_limits,
|
|
1946
|
+
event_stream_handler=event_stream_handler,
|
|
1947
|
+
**kwargs,
|
|
1948
|
+
)
|
|
1949
|
+
return result_
|
|
1950
|
+
except* UsageLimitExceeded as ule:
|
|
1951
|
+
emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
|
|
1952
|
+
emit_info(
|
|
1953
|
+
"The agent has reached its usage limit. You can ask it to continue by saying 'please continue' or similar.",
|
|
1954
|
+
group_id=group_id,
|
|
1955
|
+
)
|
|
1956
|
+
except* mcp.shared.exceptions.McpError as mcp_error:
|
|
1957
|
+
emit_info(f"MCP server error: {str(mcp_error)}", group_id=group_id)
|
|
1958
|
+
emit_info(f"{str(mcp_error)}", group_id=group_id)
|
|
1959
|
+
emit_info(
|
|
1960
|
+
"Try disabling any malfunctioning MCP servers", group_id=group_id
|
|
1961
|
+
)
|
|
1962
|
+
except* asyncio.exceptions.CancelledError:
|
|
1963
|
+
emit_info("Cancelled")
|
|
1964
|
+
if get_use_dbos():
|
|
1965
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1966
|
+
except* InterruptedError as ie:
|
|
1967
|
+
emit_info(f"Interrupted: {str(ie)}")
|
|
1968
|
+
if get_use_dbos():
|
|
1969
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1970
|
+
except* Exception as other_error:
|
|
1971
|
+
# Filter out CancelledError and UsageLimitExceeded from the exception group - let it propagate
|
|
1972
|
+
remaining_exceptions = []
|
|
1973
|
+
|
|
1974
|
+
def collect_non_cancelled_exceptions(exc):
|
|
1975
|
+
if isinstance(exc, ExceptionGroup):
|
|
1976
|
+
for sub_exc in exc.exceptions:
|
|
1977
|
+
collect_non_cancelled_exceptions(sub_exc)
|
|
1978
|
+
elif not isinstance(
|
|
1979
|
+
exc, (asyncio.CancelledError, UsageLimitExceeded)
|
|
1980
|
+
):
|
|
1981
|
+
remaining_exceptions.append(exc)
|
|
1982
|
+
emit_info(f"Unexpected error: {str(exc)}", group_id=group_id)
|
|
1983
|
+
emit_info(f"{str(exc.args)}", group_id=group_id)
|
|
1984
|
+
# Log to file for debugging
|
|
1985
|
+
log_error(
|
|
1986
|
+
exc,
|
|
1987
|
+
context=f"Agent run (group_id={group_id})",
|
|
1988
|
+
include_traceback=True,
|
|
1989
|
+
)
|
|
1990
|
+
|
|
1991
|
+
collect_non_cancelled_exceptions(other_error)
|
|
1992
|
+
|
|
1993
|
+
# If there are CancelledError exceptions in the group, re-raise them
|
|
1994
|
+
cancelled_exceptions = []
|
|
1995
|
+
|
|
1996
|
+
def collect_cancelled_exceptions(exc):
|
|
1997
|
+
if isinstance(exc, ExceptionGroup):
|
|
1998
|
+
for sub_exc in exc.exceptions:
|
|
1999
|
+
collect_cancelled_exceptions(sub_exc)
|
|
2000
|
+
elif isinstance(exc, asyncio.CancelledError):
|
|
2001
|
+
cancelled_exceptions.append(exc)
|
|
2002
|
+
|
|
2003
|
+
collect_cancelled_exceptions(other_error)
|
|
2004
|
+
finally:
|
|
2005
|
+
self.set_message_history(
|
|
2006
|
+
self.prune_interrupted_tool_calls(self.get_message_history())
|
|
2007
|
+
)
|
|
2008
|
+
|
|
2009
|
+
# Create the task FIRST
|
|
2010
|
+
agent_task = asyncio.create_task(run_agent_task())
|
|
2011
|
+
|
|
2012
|
+
# Fire agent_run_start hook - plugins can use this to start background tasks
|
|
2013
|
+
# (e.g., token refresh heartbeats for OAuth models)
|
|
2014
|
+
try:
|
|
2015
|
+
await on_agent_run_start(
|
|
2016
|
+
agent_name=self.name,
|
|
2017
|
+
model_name=self.get_model_name(),
|
|
2018
|
+
session_id=group_id,
|
|
2019
|
+
)
|
|
2020
|
+
except Exception:
|
|
2021
|
+
pass # Don't fail agent run if hook fails
|
|
2022
|
+
|
|
2023
|
+
loop = asyncio.get_running_loop()
|
|
2024
|
+
|
|
2025
|
+
def schedule_agent_cancel() -> None:
|
|
2026
|
+
from code_puppy.tools.command_runner import _RUNNING_PROCESSES
|
|
2027
|
+
|
|
2028
|
+
if len(_RUNNING_PROCESSES):
|
|
2029
|
+
emit_warning(
|
|
2030
|
+
"Refusing to cancel Agent while a shell command is currently running - press Ctrl+X to cancel the shell command."
|
|
2031
|
+
)
|
|
2032
|
+
return
|
|
2033
|
+
if agent_task.done():
|
|
2034
|
+
return
|
|
2035
|
+
|
|
2036
|
+
# Cancel all active subagent tasks
|
|
2037
|
+
if _active_subagent_tasks:
|
|
2038
|
+
emit_warning(
|
|
2039
|
+
f"Cancelling {len(_active_subagent_tasks)} active subagent task(s)..."
|
|
2040
|
+
)
|
|
2041
|
+
for task in list(
|
|
2042
|
+
_active_subagent_tasks
|
|
2043
|
+
): # Create a copy since we'll be modifying the set
|
|
2044
|
+
if not task.done():
|
|
2045
|
+
loop.call_soon_threadsafe(task.cancel)
|
|
2046
|
+
loop.call_soon_threadsafe(agent_task.cancel)
|
|
2047
|
+
|
|
2048
|
+
def keyboard_interrupt_handler(_sig, _frame):
|
|
2049
|
+
# If we're awaiting user input (e.g., file permission prompt),
|
|
2050
|
+
# don't cancel the agent - let the input() call handle the interrupt naturally
|
|
2051
|
+
if is_awaiting_user_input():
|
|
2052
|
+
# Don't do anything here - let the input() call raise KeyboardInterrupt naturally
|
|
2053
|
+
return
|
|
2054
|
+
|
|
2055
|
+
schedule_agent_cancel()
|
|
2056
|
+
|
|
2057
|
+
def graceful_sigint_handler(_sig, _frame):
|
|
2058
|
+
# When using keyboard-based cancel, SIGINT should be a no-op
|
|
2059
|
+
# (just show a hint to user about the configured cancel key)
|
|
2060
|
+
# Also reset terminal to prevent bricking on Windows+uvx
|
|
2061
|
+
from code_puppy.keymap import get_cancel_agent_display_name
|
|
2062
|
+
from code_puppy.terminal_utils import reset_windows_terminal_full
|
|
2063
|
+
|
|
2064
|
+
# Reset terminal state first to prevent bricking
|
|
2065
|
+
reset_windows_terminal_full()
|
|
2066
|
+
|
|
2067
|
+
cancel_key = get_cancel_agent_display_name()
|
|
2068
|
+
emit_info(f"Use {cancel_key} to cancel the agent task.")
|
|
2069
|
+
|
|
2070
|
+
original_handler = None
|
|
2071
|
+
key_listener_stop_event = None
|
|
2072
|
+
_key_listener_thread = None
|
|
2073
|
+
|
|
2074
|
+
try:
|
|
2075
|
+
if cancel_agent_uses_signal():
|
|
2076
|
+
# Use SIGINT-based cancellation (default Ctrl+C behavior)
|
|
2077
|
+
original_handler = signal.signal(
|
|
2078
|
+
signal.SIGINT, keyboard_interrupt_handler
|
|
2079
|
+
)
|
|
2080
|
+
else:
|
|
2081
|
+
# Use keyboard listener for agent cancellation
|
|
2082
|
+
# Set a graceful SIGINT handler that shows a hint
|
|
2083
|
+
original_handler = signal.signal(signal.SIGINT, graceful_sigint_handler)
|
|
2084
|
+
# Spawn keyboard listener with the cancel agent callback
|
|
2085
|
+
key_listener_stop_event = threading.Event()
|
|
2086
|
+
_key_listener_thread = self._spawn_ctrl_x_key_listener(
|
|
2087
|
+
key_listener_stop_event,
|
|
2088
|
+
on_escape=lambda: None, # Ctrl+X handled by command_runner
|
|
2089
|
+
on_cancel_agent=schedule_agent_cancel,
|
|
2090
|
+
)
|
|
2091
|
+
|
|
2092
|
+
# Wait for the task to complete or be cancelled
|
|
2093
|
+
result = await agent_task
|
|
2094
|
+
|
|
2095
|
+
# Update MCP tool cache after successful run for accurate token estimation
|
|
2096
|
+
if hasattr(self, "_mcp_servers") and self._mcp_servers:
|
|
2097
|
+
try:
|
|
2098
|
+
await self._update_mcp_tool_cache()
|
|
2099
|
+
except Exception:
|
|
2100
|
+
pass # Don't fail the run if cache update fails
|
|
2101
|
+
|
|
2102
|
+
# Extract response text for the callback
|
|
2103
|
+
_run_response_text = ""
|
|
2104
|
+
if result is not None:
|
|
2105
|
+
if hasattr(result, "data"):
|
|
2106
|
+
_run_response_text = str(result.data) if result.data else ""
|
|
2107
|
+
elif hasattr(result, "output"):
|
|
2108
|
+
_run_response_text = str(result.output) if result.output else ""
|
|
2109
|
+
else:
|
|
2110
|
+
_run_response_text = str(result)
|
|
2111
|
+
|
|
2112
|
+
_run_success = True
|
|
2113
|
+
_run_error = None
|
|
2114
|
+
return result
|
|
2115
|
+
except asyncio.CancelledError:
|
|
2116
|
+
_run_success = False
|
|
2117
|
+
_run_error = None # Cancellation is not an error
|
|
2118
|
+
_run_response_text = ""
|
|
2119
|
+
agent_task.cancel()
|
|
2120
|
+
except KeyboardInterrupt:
|
|
2121
|
+
_run_success = False
|
|
2122
|
+
_run_error = None # User interrupt is not an error
|
|
2123
|
+
_run_response_text = ""
|
|
2124
|
+
if not agent_task.done():
|
|
2125
|
+
agent_task.cancel()
|
|
2126
|
+
except Exception as e:
|
|
2127
|
+
_run_success = False
|
|
2128
|
+
_run_error = e
|
|
2129
|
+
_run_response_text = ""
|
|
2130
|
+
raise
|
|
2131
|
+
finally:
|
|
2132
|
+
# Fire agent_run_end hook - plugins can use this for:
|
|
2133
|
+
# - Stopping background tasks (token refresh heartbeats)
|
|
2134
|
+
# - Workflow orchestration (Ralph's autonomous loop)
|
|
2135
|
+
# - Logging/analytics
|
|
2136
|
+
try:
|
|
2137
|
+
await on_agent_run_end(
|
|
2138
|
+
agent_name=self.name,
|
|
2139
|
+
model_name=self.get_model_name(),
|
|
2140
|
+
session_id=group_id,
|
|
2141
|
+
success=_run_success,
|
|
2142
|
+
error=_run_error,
|
|
2143
|
+
response_text=_run_response_text,
|
|
2144
|
+
metadata={"model": self.get_model_name()},
|
|
2145
|
+
)
|
|
2146
|
+
except Exception:
|
|
2147
|
+
pass # Don't fail cleanup if hook fails
|
|
2148
|
+
|
|
2149
|
+
# Stop keyboard listener if it was started
|
|
2150
|
+
if key_listener_stop_event is not None:
|
|
2151
|
+
key_listener_stop_event.set()
|
|
2152
|
+
# Restore original signal handler
|
|
2153
|
+
if (
|
|
2154
|
+
original_handler is not None
|
|
2155
|
+
): # Explicit None check - SIG_DFL can be 0/falsy!
|
|
2156
|
+
signal.signal(signal.SIGINT, original_handler)
|