code-puppy 0.0.169__py3-none-any.whl → 0.0.366__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +7 -1
- code_puppy/agents/__init__.py +8 -8
- code_puppy/agents/agent_c_reviewer.py +155 -0
- code_puppy/agents/agent_code_puppy.py +9 -2
- code_puppy/agents/agent_code_reviewer.py +90 -0
- code_puppy/agents/agent_cpp_reviewer.py +132 -0
- code_puppy/agents/agent_creator_agent.py +48 -9
- code_puppy/agents/agent_golang_reviewer.py +151 -0
- code_puppy/agents/agent_javascript_reviewer.py +160 -0
- code_puppy/agents/agent_manager.py +146 -199
- code_puppy/agents/agent_pack_leader.py +383 -0
- code_puppy/agents/agent_planning.py +163 -0
- code_puppy/agents/agent_python_programmer.py +165 -0
- code_puppy/agents/agent_python_reviewer.py +90 -0
- code_puppy/agents/agent_qa_expert.py +163 -0
- code_puppy/agents/agent_qa_kitten.py +208 -0
- code_puppy/agents/agent_security_auditor.py +181 -0
- code_puppy/agents/agent_terminal_qa.py +323 -0
- code_puppy/agents/agent_typescript_reviewer.py +166 -0
- code_puppy/agents/base_agent.py +1713 -1
- code_puppy/agents/event_stream_handler.py +350 -0
- code_puppy/agents/json_agent.py +12 -1
- code_puppy/agents/pack/__init__.py +34 -0
- code_puppy/agents/pack/bloodhound.py +304 -0
- code_puppy/agents/pack/husky.py +321 -0
- code_puppy/agents/pack/retriever.py +393 -0
- code_puppy/agents/pack/shepherd.py +348 -0
- code_puppy/agents/pack/terrier.py +287 -0
- code_puppy/agents/pack/watchdog.py +367 -0
- code_puppy/agents/prompt_reviewer.py +145 -0
- code_puppy/agents/subagent_stream_handler.py +276 -0
- code_puppy/api/__init__.py +13 -0
- code_puppy/api/app.py +169 -0
- code_puppy/api/main.py +21 -0
- code_puppy/api/pty_manager.py +446 -0
- code_puppy/api/routers/__init__.py +12 -0
- code_puppy/api/routers/agents.py +36 -0
- code_puppy/api/routers/commands.py +217 -0
- code_puppy/api/routers/config.py +74 -0
- code_puppy/api/routers/sessions.py +232 -0
- code_puppy/api/templates/terminal.html +361 -0
- code_puppy/api/websocket.py +154 -0
- code_puppy/callbacks.py +174 -4
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/claude_cache_client.py +586 -0
- code_puppy/cli_runner.py +916 -0
- code_puppy/command_line/add_model_menu.py +1079 -0
- code_puppy/command_line/agent_menu.py +395 -0
- code_puppy/command_line/attachments.py +395 -0
- code_puppy/command_line/autosave_menu.py +605 -0
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/colors_menu.py +520 -0
- code_puppy/command_line/command_handler.py +233 -627
- code_puppy/command_line/command_registry.py +150 -0
- code_puppy/command_line/config_commands.py +715 -0
- code_puppy/command_line/core_commands.py +792 -0
- code_puppy/command_line/diff_menu.py +863 -0
- code_puppy/command_line/load_context_completion.py +15 -22
- code_puppy/command_line/mcp/base.py +1 -4
- code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
- code_puppy/command_line/mcp/custom_server_form.py +688 -0
- code_puppy/command_line/mcp/custom_server_installer.py +195 -0
- code_puppy/command_line/mcp/edit_command.py +148 -0
- code_puppy/command_line/mcp/handler.py +9 -4
- code_puppy/command_line/mcp/help_command.py +6 -5
- code_puppy/command_line/mcp/install_command.py +16 -27
- code_puppy/command_line/mcp/install_menu.py +685 -0
- code_puppy/command_line/mcp/list_command.py +3 -3
- code_puppy/command_line/mcp/logs_command.py +174 -65
- code_puppy/command_line/mcp/remove_command.py +2 -2
- code_puppy/command_line/mcp/restart_command.py +12 -4
- code_puppy/command_line/mcp/search_command.py +17 -11
- code_puppy/command_line/mcp/start_all_command.py +22 -13
- code_puppy/command_line/mcp/start_command.py +50 -31
- code_puppy/command_line/mcp/status_command.py +6 -7
- code_puppy/command_line/mcp/stop_all_command.py +11 -8
- code_puppy/command_line/mcp/stop_command.py +11 -10
- code_puppy/command_line/mcp/test_command.py +2 -2
- code_puppy/command_line/mcp/utils.py +1 -1
- code_puppy/command_line/mcp/wizard_utils.py +22 -18
- code_puppy/command_line/mcp_completion.py +174 -0
- code_puppy/command_line/model_picker_completion.py +89 -30
- code_puppy/command_line/model_settings_menu.py +884 -0
- code_puppy/command_line/motd.py +14 -8
- code_puppy/command_line/onboarding_slides.py +179 -0
- code_puppy/command_line/onboarding_wizard.py +340 -0
- code_puppy/command_line/pin_command_completion.py +329 -0
- code_puppy/command_line/prompt_toolkit_completion.py +626 -75
- code_puppy/command_line/session_commands.py +296 -0
- code_puppy/command_line/utils.py +54 -0
- code_puppy/config.py +1181 -51
- code_puppy/error_logging.py +118 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/gemini_model.py +602 -0
- code_puppy/http_utils.py +220 -104
- code_puppy/keymap.py +128 -0
- code_puppy/main.py +5 -594
- code_puppy/{mcp → mcp_}/__init__.py +17 -0
- code_puppy/{mcp → mcp_}/async_lifecycle.py +35 -4
- code_puppy/{mcp → mcp_}/blocking_startup.py +70 -43
- code_puppy/{mcp → mcp_}/captured_stdio_server.py +2 -2
- code_puppy/{mcp → mcp_}/config_wizard.py +5 -5
- code_puppy/{mcp → mcp_}/dashboard.py +15 -6
- code_puppy/{mcp → mcp_}/examples/retry_example.py +4 -1
- code_puppy/{mcp → mcp_}/managed_server.py +66 -39
- code_puppy/{mcp → mcp_}/manager.py +146 -52
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/{mcp → mcp_}/registry.py +6 -6
- code_puppy/{mcp → mcp_}/server_registry_catalog.py +25 -8
- code_puppy/messaging/__init__.py +199 -2
- code_puppy/messaging/bus.py +610 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +17 -48
- code_puppy/messaging/messages.py +500 -0
- code_puppy/messaging/queue_console.py +1 -24
- code_puppy/messaging/renderers.py +43 -146
- code_puppy/messaging/rich_renderer.py +1027 -0
- code_puppy/messaging/spinner/__init__.py +33 -5
- code_puppy/messaging/spinner/console_spinner.py +92 -52
- code_puppy/messaging/spinner/spinner_base.py +29 -0
- code_puppy/messaging/subagent_console.py +461 -0
- code_puppy/model_factory.py +686 -80
- code_puppy/model_utils.py +167 -0
- code_puppy/models.json +86 -104
- code_puppy/models_dev_api.json +1 -0
- code_puppy/models_dev_parser.py +592 -0
- code_puppy/plugins/__init__.py +164 -10
- code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
- code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +704 -0
- code_puppy/plugins/antigravity_oauth/config.py +42 -0
- code_puppy/plugins/antigravity_oauth/constants.py +136 -0
- code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
- code_puppy/plugins/antigravity_oauth/storage.py +271 -0
- code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
- code_puppy/plugins/antigravity_oauth/token.py +167 -0
- code_puppy/plugins/antigravity_oauth/transport.py +767 -0
- code_puppy/plugins/antigravity_oauth/utils.py +169 -0
- code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
- code_puppy/plugins/chatgpt_oauth/config.py +52 -0
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +328 -0
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +94 -0
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +293 -0
- code_puppy/plugins/chatgpt_oauth/utils.py +489 -0
- code_puppy/plugins/claude_code_oauth/README.md +167 -0
- code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
- code_puppy/plugins/claude_code_oauth/__init__.py +6 -0
- code_puppy/plugins/claude_code_oauth/config.py +50 -0
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +308 -0
- code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_puppy/plugins/claude_code_oauth/utils.py +518 -0
- code_puppy/plugins/customizable_commands/__init__.py +0 -0
- code_puppy/plugins/customizable_commands/register_callbacks.py +169 -0
- code_puppy/plugins/example_custom_command/README.md +280 -0
- code_puppy/plugins/example_custom_command/register_callbacks.py +51 -0
- code_puppy/plugins/file_permission_handler/__init__.py +4 -0
- code_puppy/plugins/file_permission_handler/register_callbacks.py +523 -0
- code_puppy/plugins/frontend_emitter/__init__.py +25 -0
- code_puppy/plugins/frontend_emitter/emitter.py +121 -0
- code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
- code_puppy/plugins/oauth_puppy_html.py +228 -0
- code_puppy/plugins/shell_safety/__init__.py +6 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
- code_puppy/prompts/antigravity_system_prompt.md +1 -0
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/reopenable_async_client.py +8 -8
- code_puppy/round_robin_model.py +10 -15
- code_puppy/session_storage.py +294 -0
- code_puppy/status_display.py +21 -4
- code_puppy/summarization_agent.py +52 -14
- code_puppy/terminal_utils.py +418 -0
- code_puppy/tools/__init__.py +139 -6
- code_puppy/tools/agent_tools.py +548 -49
- code_puppy/tools/browser/__init__.py +37 -0
- code_puppy/tools/browser/browser_control.py +289 -0
- code_puppy/tools/browser/browser_interactions.py +545 -0
- code_puppy/tools/browser/browser_locators.py +640 -0
- code_puppy/tools/browser/browser_manager.py +316 -0
- code_puppy/tools/browser/browser_navigation.py +251 -0
- code_puppy/tools/browser/browser_screenshot.py +179 -0
- code_puppy/tools/browser/browser_scripts.py +462 -0
- code_puppy/tools/browser/browser_workflows.py +221 -0
- code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
- code_puppy/tools/browser/terminal_command_tools.py +521 -0
- code_puppy/tools/browser/terminal_screenshot_tools.py +556 -0
- code_puppy/tools/browser/terminal_tools.py +525 -0
- code_puppy/tools/command_runner.py +941 -153
- code_puppy/tools/common.py +1146 -6
- code_puppy/tools/display.py +84 -0
- code_puppy/tools/file_modifications.py +288 -89
- code_puppy/tools/file_operations.py +352 -266
- code_puppy/tools/subagent_context.py +158 -0
- code_puppy/uvx_detection.py +242 -0
- code_puppy/version_checker.py +30 -11
- code_puppy-0.0.366.data/data/code_puppy/models.json +110 -0
- code_puppy-0.0.366.data/data/code_puppy/models_dev_api.json +1 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/METADATA +184 -67
- code_puppy-0.0.366.dist-info/RECORD +217 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/WHEEL +1 -1
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/entry_points.txt +1 -0
- code_puppy/agent.py +0 -231
- code_puppy/agents/agent_orchestrator.json +0 -26
- code_puppy/agents/runtime_manager.py +0 -272
- code_puppy/command_line/mcp/add_command.py +0 -183
- code_puppy/command_line/meta_command_handler.py +0 -153
- code_puppy/message_history_processor.py +0 -490
- code_puppy/messaging/spinner/textual_spinner.py +0 -101
- code_puppy/state_management.py +0 -200
- code_puppy/tui/__init__.py +0 -10
- code_puppy/tui/app.py +0 -986
- code_puppy/tui/components/__init__.py +0 -21
- code_puppy/tui/components/chat_view.py +0 -550
- code_puppy/tui/components/command_history_modal.py +0 -218
- code_puppy/tui/components/copy_button.py +0 -139
- code_puppy/tui/components/custom_widgets.py +0 -63
- code_puppy/tui/components/human_input_modal.py +0 -175
- code_puppy/tui/components/input_area.py +0 -167
- code_puppy/tui/components/sidebar.py +0 -309
- code_puppy/tui/components/status_bar.py +0 -182
- code_puppy/tui/messages.py +0 -27
- code_puppy/tui/models/__init__.py +0 -8
- code_puppy/tui/models/chat_message.py +0 -25
- code_puppy/tui/models/command_history.py +0 -89
- code_puppy/tui/models/enums.py +0 -24
- code_puppy/tui/screens/__init__.py +0 -15
- code_puppy/tui/screens/help.py +0 -130
- code_puppy/tui/screens/mcp_install_wizard.py +0 -803
- code_puppy/tui/screens/settings.py +0 -290
- code_puppy/tui/screens/tools.py +0 -74
- code_puppy-0.0.169.data/data/code_puppy/models.json +0 -128
- code_puppy-0.0.169.dist-info/RECORD +0 -112
- /code_puppy/{mcp → mcp_}/circuit_breaker.py +0 -0
- /code_puppy/{mcp → mcp_}/error_isolation.py +0 -0
- /code_puppy/{mcp → mcp_}/health_monitor.py +0 -0
- /code_puppy/{mcp → mcp_}/retry_manager.py +0 -0
- /code_puppy/{mcp → mcp_}/status_tracker.py +0 -0
- /code_puppy/{mcp → mcp_}/system_tools.py +0 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/licenses/LICENSE +0 -0
code_puppy/agents/base_agent.py
CHANGED
|
@@ -1,8 +1,87 @@
|
|
|
1
1
|
"""Base agent configuration class for defining agent properties."""
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import math
|
|
6
|
+
import signal
|
|
7
|
+
import threading
|
|
3
8
|
import uuid
|
|
4
9
|
from abc import ABC, abstractmethod
|
|
5
|
-
from typing import
|
|
10
|
+
from typing import (
|
|
11
|
+
Any,
|
|
12
|
+
Callable,
|
|
13
|
+
Dict,
|
|
14
|
+
List,
|
|
15
|
+
Optional,
|
|
16
|
+
Sequence,
|
|
17
|
+
Set,
|
|
18
|
+
Tuple,
|
|
19
|
+
Type,
|
|
20
|
+
Union,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
import mcp
|
|
24
|
+
import pydantic
|
|
25
|
+
import pydantic_ai.models
|
|
26
|
+
from dbos import DBOS, SetWorkflowID
|
|
27
|
+
from pydantic_ai import Agent as PydanticAgent
|
|
28
|
+
from pydantic_ai import (
|
|
29
|
+
BinaryContent,
|
|
30
|
+
DocumentUrl,
|
|
31
|
+
ImageUrl,
|
|
32
|
+
RunContext,
|
|
33
|
+
UsageLimitExceeded,
|
|
34
|
+
UsageLimits,
|
|
35
|
+
)
|
|
36
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
37
|
+
from pydantic_ai.messages import (
|
|
38
|
+
ModelMessage,
|
|
39
|
+
ModelRequest,
|
|
40
|
+
TextPart,
|
|
41
|
+
ThinkingPart,
|
|
42
|
+
ToolCallPart,
|
|
43
|
+
ToolCallPartDelta,
|
|
44
|
+
ToolReturn,
|
|
45
|
+
ToolReturnPart,
|
|
46
|
+
)
|
|
47
|
+
from rich.text import Text
|
|
48
|
+
|
|
49
|
+
from code_puppy.agents.event_stream_handler import event_stream_handler
|
|
50
|
+
|
|
51
|
+
# Consolidated relative imports
|
|
52
|
+
from code_puppy.config import (
|
|
53
|
+
get_agent_pinned_model,
|
|
54
|
+
get_compaction_strategy,
|
|
55
|
+
get_compaction_threshold,
|
|
56
|
+
get_global_model_name,
|
|
57
|
+
get_message_limit,
|
|
58
|
+
get_protected_token_count,
|
|
59
|
+
get_use_dbos,
|
|
60
|
+
get_value,
|
|
61
|
+
)
|
|
62
|
+
from code_puppy.error_logging import log_error
|
|
63
|
+
from code_puppy.keymap import cancel_agent_uses_signal, get_cancel_agent_char_code
|
|
64
|
+
from code_puppy.mcp_ import get_mcp_manager
|
|
65
|
+
from code_puppy.messaging import (
|
|
66
|
+
emit_error,
|
|
67
|
+
emit_info,
|
|
68
|
+
emit_warning,
|
|
69
|
+
)
|
|
70
|
+
from code_puppy.messaging.spinner import (
|
|
71
|
+
SpinnerBase,
|
|
72
|
+
update_spinner_context,
|
|
73
|
+
)
|
|
74
|
+
from code_puppy.model_factory import ModelFactory, make_model_settings
|
|
75
|
+
from code_puppy.summarization_agent import run_summarization_sync
|
|
76
|
+
from code_puppy.tools.agent_tools import _active_subagent_tasks
|
|
77
|
+
from code_puppy.tools.command_runner import (
|
|
78
|
+
is_awaiting_user_input,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Global flag to track delayed compaction requests
|
|
82
|
+
_delayed_compaction_requested = False
|
|
83
|
+
|
|
84
|
+
_reload_count = 0
|
|
6
85
|
|
|
7
86
|
|
|
8
87
|
class BaseAgent(ABC):
|
|
@@ -12,6 +91,15 @@ class BaseAgent(ABC):
|
|
|
12
91
|
self.id = str(uuid.uuid4())
|
|
13
92
|
self._message_history: List[Any] = []
|
|
14
93
|
self._compacted_message_hashes: Set[str] = set()
|
|
94
|
+
# Agent construction cache
|
|
95
|
+
self._code_generation_agent = None
|
|
96
|
+
self._last_model_name: Optional[str] = None
|
|
97
|
+
# Puppy rules loaded lazily
|
|
98
|
+
self._puppy_rules: Optional[str] = None
|
|
99
|
+
self.cur_model: pydantic_ai.models.Model
|
|
100
|
+
# Cache for MCP tool definitions (for token estimation)
|
|
101
|
+
# This is populated after the first successful run when MCP tools are retrieved
|
|
102
|
+
self._mcp_tool_definitions_cache: List[Dict[str, Any]] = []
|
|
15
103
|
|
|
16
104
|
@property
|
|
17
105
|
@abstractmethod
|
|
@@ -114,3 +202,1627 @@ class BaseAgent(ABC):
|
|
|
114
202
|
message_hash: Hash of a message that has been compacted/summarized.
|
|
115
203
|
"""
|
|
116
204
|
self._compacted_message_hashes.add(message_hash)
|
|
205
|
+
|
|
206
|
+
def get_model_name(self) -> Optional[str]:
|
|
207
|
+
"""Get pinned model name for this agent, if specified.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Model name to use for this agent, or global default if none pinned.
|
|
211
|
+
"""
|
|
212
|
+
pinned = get_agent_pinned_model(self.name)
|
|
213
|
+
if pinned == "" or pinned is None:
|
|
214
|
+
return get_global_model_name()
|
|
215
|
+
return pinned
|
|
216
|
+
|
|
217
|
+
def _clean_binaries(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
218
|
+
cleaned = []
|
|
219
|
+
for message in messages:
|
|
220
|
+
parts = []
|
|
221
|
+
for part in message.parts:
|
|
222
|
+
if hasattr(part, "content") and isinstance(part.content, list):
|
|
223
|
+
content = []
|
|
224
|
+
for item in part.content:
|
|
225
|
+
if not isinstance(item, BinaryContent):
|
|
226
|
+
content.append(item)
|
|
227
|
+
part.content = content
|
|
228
|
+
parts.append(part)
|
|
229
|
+
cleaned.append(message)
|
|
230
|
+
return cleaned
|
|
231
|
+
|
|
232
|
+
# Message history processing methods (moved from state_management.py and message_history_processor.py)
|
|
233
|
+
def _stringify_part(self, part: Any) -> str:
|
|
234
|
+
"""Create a stable string representation for a message part.
|
|
235
|
+
|
|
236
|
+
We deliberately ignore timestamps so identical content hashes the same even when
|
|
237
|
+
emitted at different times. This prevents status updates from blowing up the
|
|
238
|
+
history when they are repeated with new timestamps."""
|
|
239
|
+
|
|
240
|
+
attributes: List[str] = [part.__class__.__name__]
|
|
241
|
+
|
|
242
|
+
# Role/instructions help disambiguate parts that otherwise share content
|
|
243
|
+
if hasattr(part, "role") and part.role:
|
|
244
|
+
attributes.append(f"role={part.role}")
|
|
245
|
+
if hasattr(part, "instructions") and part.instructions:
|
|
246
|
+
attributes.append(f"instructions={part.instructions}")
|
|
247
|
+
|
|
248
|
+
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
249
|
+
attributes.append(f"tool_call_id={part.tool_call_id}")
|
|
250
|
+
|
|
251
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
252
|
+
attributes.append(f"tool_name={part.tool_name}")
|
|
253
|
+
|
|
254
|
+
content = getattr(part, "content", None)
|
|
255
|
+
if content is None:
|
|
256
|
+
attributes.append("content=None")
|
|
257
|
+
elif isinstance(content, str):
|
|
258
|
+
attributes.append(f"content={content}")
|
|
259
|
+
elif isinstance(content, pydantic.BaseModel):
|
|
260
|
+
attributes.append(
|
|
261
|
+
f"content={json.dumps(content.model_dump(), sort_keys=True)}"
|
|
262
|
+
)
|
|
263
|
+
elif isinstance(content, dict):
|
|
264
|
+
attributes.append(f"content={json.dumps(content, sort_keys=True)}")
|
|
265
|
+
elif isinstance(content, list):
|
|
266
|
+
for item in content:
|
|
267
|
+
if isinstance(item, str):
|
|
268
|
+
attributes.append(f"content={item}")
|
|
269
|
+
if isinstance(item, BinaryContent):
|
|
270
|
+
attributes.append(f"BinaryContent={hash(item.data)}")
|
|
271
|
+
else:
|
|
272
|
+
attributes.append(f"content={repr(content)}")
|
|
273
|
+
result = "|".join(attributes)
|
|
274
|
+
return result
|
|
275
|
+
|
|
276
|
+
def hash_message(self, message: Any) -> int:
|
|
277
|
+
"""Create a stable hash for a model message that ignores timestamps."""
|
|
278
|
+
role = getattr(message, "role", None)
|
|
279
|
+
instructions = getattr(message, "instructions", None)
|
|
280
|
+
header_bits: List[str] = []
|
|
281
|
+
if role:
|
|
282
|
+
header_bits.append(f"role={role}")
|
|
283
|
+
if instructions:
|
|
284
|
+
header_bits.append(f"instructions={instructions}")
|
|
285
|
+
|
|
286
|
+
part_strings = [
|
|
287
|
+
self._stringify_part(part) for part in getattr(message, "parts", [])
|
|
288
|
+
]
|
|
289
|
+
canonical = "||".join(header_bits + part_strings)
|
|
290
|
+
return hash(canonical)
|
|
291
|
+
|
|
292
|
+
def stringify_message_part(self, part) -> str:
|
|
293
|
+
"""
|
|
294
|
+
Convert a message part to a string representation for token estimation or other uses.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
part: A message part that may contain content or be a tool call
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
String representation of the message part
|
|
301
|
+
"""
|
|
302
|
+
result = ""
|
|
303
|
+
if hasattr(part, "part_kind"):
|
|
304
|
+
result += part.part_kind + ": "
|
|
305
|
+
else:
|
|
306
|
+
result += str(type(part)) + ": "
|
|
307
|
+
|
|
308
|
+
# Handle content
|
|
309
|
+
if hasattr(part, "content") and part.content:
|
|
310
|
+
# Handle different content types
|
|
311
|
+
if isinstance(part.content, str):
|
|
312
|
+
result = part.content
|
|
313
|
+
elif isinstance(part.content, pydantic.BaseModel):
|
|
314
|
+
result = json.dumps(part.content.model_dump())
|
|
315
|
+
elif isinstance(part.content, dict):
|
|
316
|
+
result = json.dumps(part.content)
|
|
317
|
+
elif isinstance(part.content, list):
|
|
318
|
+
result = ""
|
|
319
|
+
for item in part.content:
|
|
320
|
+
if isinstance(item, str):
|
|
321
|
+
result += item + "\n"
|
|
322
|
+
if isinstance(item, BinaryContent):
|
|
323
|
+
result += f"BinaryContent={hash(item.data)}\n"
|
|
324
|
+
else:
|
|
325
|
+
result = str(part.content)
|
|
326
|
+
|
|
327
|
+
# Handle tool calls which may have additional token costs
|
|
328
|
+
# If part also has content, we'll process tool calls separately
|
|
329
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
330
|
+
# Estimate tokens for tool name and parameters
|
|
331
|
+
tool_text = part.tool_name
|
|
332
|
+
if hasattr(part, "args"):
|
|
333
|
+
tool_text += f" {str(part.args)}"
|
|
334
|
+
result += tool_text
|
|
335
|
+
|
|
336
|
+
return result
|
|
337
|
+
|
|
338
|
+
def estimate_token_count(self, text: str) -> int:
|
|
339
|
+
"""
|
|
340
|
+
Simple token estimation using len(message) / 3.
|
|
341
|
+
This replaces tiktoken with a much simpler approach.
|
|
342
|
+
"""
|
|
343
|
+
return max(1, math.floor((len(text) / 3)))
|
|
344
|
+
|
|
345
|
+
def estimate_tokens_for_message(self, message: ModelMessage) -> int:
|
|
346
|
+
"""
|
|
347
|
+
Estimate the number of tokens in a message using len(message)
|
|
348
|
+
Simple and fast replacement for tiktoken.
|
|
349
|
+
"""
|
|
350
|
+
total_tokens = 0
|
|
351
|
+
|
|
352
|
+
for part in message.parts:
|
|
353
|
+
part_str = self.stringify_message_part(part)
|
|
354
|
+
if part_str:
|
|
355
|
+
total_tokens += self.estimate_token_count(part_str)
|
|
356
|
+
|
|
357
|
+
return max(1, total_tokens)
|
|
358
|
+
|
|
359
|
+
def estimate_context_overhead_tokens(self) -> int:
|
|
360
|
+
"""
|
|
361
|
+
Estimate the token overhead from system prompt and tool definitions.
|
|
362
|
+
|
|
363
|
+
This accounts for tokens that are always present in the context:
|
|
364
|
+
- System prompt (for non-Claude-Code models)
|
|
365
|
+
- Tool definitions (name, description, parameter schema)
|
|
366
|
+
- MCP tool definitions
|
|
367
|
+
|
|
368
|
+
Note: For Claude Code models, the system prompt is prepended to the first
|
|
369
|
+
user message, so it's already counted in the message history tokens.
|
|
370
|
+
We only count the short fixed instructions for Claude Code models.
|
|
371
|
+
"""
|
|
372
|
+
total_tokens = 0
|
|
373
|
+
|
|
374
|
+
# 1. Estimate tokens for system prompt / instructions
|
|
375
|
+
# For Claude Code models, the full system prompt is prepended to the first
|
|
376
|
+
# user message (already in message history), so we only count the short
|
|
377
|
+
# fixed instructions. For other models, count the full system prompt.
|
|
378
|
+
try:
|
|
379
|
+
from code_puppy.model_utils import (
|
|
380
|
+
get_antigravity_instructions,
|
|
381
|
+
get_chatgpt_codex_instructions,
|
|
382
|
+
get_claude_code_instructions,
|
|
383
|
+
is_antigravity_model,
|
|
384
|
+
is_chatgpt_codex_model,
|
|
385
|
+
is_claude_code_model,
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
model_name = (
|
|
389
|
+
self.get_model_name() if hasattr(self, "get_model_name") else ""
|
|
390
|
+
)
|
|
391
|
+
if is_claude_code_model(model_name):
|
|
392
|
+
# For Claude Code models, only count the short fixed instructions
|
|
393
|
+
# The full system prompt is already in the message history
|
|
394
|
+
instructions = get_claude_code_instructions()
|
|
395
|
+
total_tokens += self.estimate_token_count(instructions)
|
|
396
|
+
elif is_chatgpt_codex_model(model_name):
|
|
397
|
+
# For ChatGPT Codex models, only count the short fixed instructions
|
|
398
|
+
# The full system prompt is already in the message history
|
|
399
|
+
instructions = get_chatgpt_codex_instructions()
|
|
400
|
+
total_tokens += self.estimate_token_count(instructions)
|
|
401
|
+
elif is_antigravity_model(model_name):
|
|
402
|
+
# For Antigravity models, only count the short fixed instructions
|
|
403
|
+
# The full system prompt is already in the message history
|
|
404
|
+
instructions = get_antigravity_instructions()
|
|
405
|
+
total_tokens += self.estimate_token_count(instructions)
|
|
406
|
+
else:
|
|
407
|
+
# For other models, count the full system prompt
|
|
408
|
+
system_prompt = self.get_system_prompt()
|
|
409
|
+
if system_prompt:
|
|
410
|
+
total_tokens += self.estimate_token_count(system_prompt)
|
|
411
|
+
except Exception:
|
|
412
|
+
pass # If we can't get system prompt, skip it
|
|
413
|
+
|
|
414
|
+
# 2. Estimate tokens for pydantic_agent tool definitions
|
|
415
|
+
pydantic_agent = getattr(self, "pydantic_agent", None)
|
|
416
|
+
if pydantic_agent:
|
|
417
|
+
tools = getattr(pydantic_agent, "_tools", None)
|
|
418
|
+
if tools and isinstance(tools, dict):
|
|
419
|
+
for tool_name, tool_func in tools.items():
|
|
420
|
+
try:
|
|
421
|
+
# Estimate tokens from tool name
|
|
422
|
+
total_tokens += self.estimate_token_count(tool_name)
|
|
423
|
+
|
|
424
|
+
# Estimate tokens from tool description
|
|
425
|
+
description = getattr(tool_func, "__doc__", None) or ""
|
|
426
|
+
if description:
|
|
427
|
+
total_tokens += self.estimate_token_count(description)
|
|
428
|
+
|
|
429
|
+
# Estimate tokens from parameter schema
|
|
430
|
+
# Tools may have a schema attribute or we can try to get it from annotations
|
|
431
|
+
schema = getattr(tool_func, "schema", None)
|
|
432
|
+
if schema:
|
|
433
|
+
schema_str = (
|
|
434
|
+
json.dumps(schema)
|
|
435
|
+
if isinstance(schema, dict)
|
|
436
|
+
else str(schema)
|
|
437
|
+
)
|
|
438
|
+
total_tokens += self.estimate_token_count(schema_str)
|
|
439
|
+
else:
|
|
440
|
+
# Try to get schema from function annotations
|
|
441
|
+
annotations = getattr(tool_func, "__annotations__", None)
|
|
442
|
+
if annotations:
|
|
443
|
+
total_tokens += self.estimate_token_count(
|
|
444
|
+
str(annotations)
|
|
445
|
+
)
|
|
446
|
+
except Exception:
|
|
447
|
+
continue # Skip tools we can't process
|
|
448
|
+
|
|
449
|
+
# 3. Estimate tokens for MCP tool definitions from cache
|
|
450
|
+
# MCP tools are fetched asynchronously, so we use a cache that's populated
|
|
451
|
+
# after the first successful run. See _update_mcp_tool_cache() method.
|
|
452
|
+
mcp_tool_cache = getattr(self, "_mcp_tool_definitions_cache", [])
|
|
453
|
+
if mcp_tool_cache:
|
|
454
|
+
for tool_def in mcp_tool_cache:
|
|
455
|
+
try:
|
|
456
|
+
# Estimate tokens from tool name
|
|
457
|
+
tool_name = tool_def.get("name", "")
|
|
458
|
+
if tool_name:
|
|
459
|
+
total_tokens += self.estimate_token_count(tool_name)
|
|
460
|
+
|
|
461
|
+
# Estimate tokens from tool description
|
|
462
|
+
description = tool_def.get("description", "")
|
|
463
|
+
if description:
|
|
464
|
+
total_tokens += self.estimate_token_count(description)
|
|
465
|
+
|
|
466
|
+
# Estimate tokens from parameter schema (inputSchema)
|
|
467
|
+
input_schema = tool_def.get("inputSchema")
|
|
468
|
+
if input_schema:
|
|
469
|
+
schema_str = (
|
|
470
|
+
json.dumps(input_schema)
|
|
471
|
+
if isinstance(input_schema, dict)
|
|
472
|
+
else str(input_schema)
|
|
473
|
+
)
|
|
474
|
+
total_tokens += self.estimate_token_count(schema_str)
|
|
475
|
+
except Exception:
|
|
476
|
+
continue # Skip tools we can't process
|
|
477
|
+
|
|
478
|
+
return total_tokens
|
|
479
|
+
|
|
480
|
+
async def _update_mcp_tool_cache(self) -> None:
|
|
481
|
+
"""
|
|
482
|
+
Update the MCP tool definitions cache by fetching tools from running MCP servers.
|
|
483
|
+
|
|
484
|
+
This should be called after a successful run to populate the cache for
|
|
485
|
+
accurate token estimation in subsequent runs.
|
|
486
|
+
"""
|
|
487
|
+
mcp_servers = getattr(self, "_mcp_servers", None)
|
|
488
|
+
if not mcp_servers:
|
|
489
|
+
return
|
|
490
|
+
|
|
491
|
+
tool_definitions = []
|
|
492
|
+
for mcp_server in mcp_servers:
|
|
493
|
+
try:
|
|
494
|
+
# Check if the server has list_tools method (pydantic-ai MCP servers)
|
|
495
|
+
if hasattr(mcp_server, "list_tools"):
|
|
496
|
+
# list_tools() returns list[mcp_types.Tool]
|
|
497
|
+
tools = await mcp_server.list_tools()
|
|
498
|
+
for tool in tools:
|
|
499
|
+
tool_def = {
|
|
500
|
+
"name": getattr(tool, "name", ""),
|
|
501
|
+
"description": getattr(tool, "description", ""),
|
|
502
|
+
"inputSchema": getattr(tool, "inputSchema", {}),
|
|
503
|
+
}
|
|
504
|
+
tool_definitions.append(tool_def)
|
|
505
|
+
except Exception:
|
|
506
|
+
# Server might not be running or accessible, skip it
|
|
507
|
+
continue
|
|
508
|
+
|
|
509
|
+
self._mcp_tool_definitions_cache = tool_definitions
|
|
510
|
+
|
|
511
|
+
def update_mcp_tool_cache_sync(self) -> None:
|
|
512
|
+
"""
|
|
513
|
+
Synchronously clear the MCP tool cache.
|
|
514
|
+
|
|
515
|
+
This clears the cache so that token counts will be recalculated on the next
|
|
516
|
+
agent run. Call this after starting/stopping MCP servers.
|
|
517
|
+
|
|
518
|
+
Note: We don't try to fetch tools synchronously because MCP servers require
|
|
519
|
+
async context management that doesn't work well from sync code. The cache
|
|
520
|
+
will be repopulated on the next successful agent run.
|
|
521
|
+
"""
|
|
522
|
+
# Simply clear the cache - it will be repopulated on the next agent run
|
|
523
|
+
# This is safer than trying to call async methods from sync context
|
|
524
|
+
self._mcp_tool_definitions_cache = []
|
|
525
|
+
|
|
526
|
+
def _is_tool_call_part(self, part: Any) -> bool:
|
|
527
|
+
if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
|
|
528
|
+
return True
|
|
529
|
+
|
|
530
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
531
|
+
if part_kind == "tool-call":
|
|
532
|
+
return True
|
|
533
|
+
|
|
534
|
+
has_tool_name = getattr(part, "tool_name", None) is not None
|
|
535
|
+
has_args = getattr(part, "args", None) is not None
|
|
536
|
+
has_args_delta = getattr(part, "args_delta", None) is not None
|
|
537
|
+
|
|
538
|
+
return bool(has_tool_name and (has_args or has_args_delta))
|
|
539
|
+
|
|
540
|
+
def _is_tool_return_part(self, part: Any) -> bool:
|
|
541
|
+
if isinstance(part, (ToolReturnPart, ToolReturn)):
|
|
542
|
+
return True
|
|
543
|
+
|
|
544
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
545
|
+
if part_kind in {"tool-return", "tool-result"}:
|
|
546
|
+
return True
|
|
547
|
+
|
|
548
|
+
if getattr(part, "tool_call_id", None) is None:
|
|
549
|
+
return False
|
|
550
|
+
|
|
551
|
+
has_content = getattr(part, "content", None) is not None
|
|
552
|
+
has_content_delta = getattr(part, "content_delta", None) is not None
|
|
553
|
+
return bool(has_content or has_content_delta)
|
|
554
|
+
|
|
555
|
+
def filter_huge_messages(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
556
|
+
filtered = [m for m in messages if self.estimate_tokens_for_message(m) < 50000]
|
|
557
|
+
pruned = self.prune_interrupted_tool_calls(filtered)
|
|
558
|
+
return pruned
|
|
559
|
+
|
|
560
|
+
def split_messages_for_protected_summarization(
|
|
561
|
+
self,
|
|
562
|
+
messages: List[ModelMessage],
|
|
563
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
564
|
+
"""
|
|
565
|
+
Split messages into two groups: messages to summarize and protected recent messages.
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
Tuple of (messages_to_summarize, protected_messages)
|
|
569
|
+
|
|
570
|
+
The protected_messages are the most recent messages that total up to the configured protected token count.
|
|
571
|
+
The system message (first message) is always protected.
|
|
572
|
+
All other messages that don't fit in the protected zone will be summarized.
|
|
573
|
+
"""
|
|
574
|
+
if len(messages) <= 1: # Just system message or empty
|
|
575
|
+
return [], messages
|
|
576
|
+
|
|
577
|
+
# Always protect the system message (first message)
|
|
578
|
+
system_message = messages[0]
|
|
579
|
+
system_tokens = self.estimate_tokens_for_message(system_message)
|
|
580
|
+
|
|
581
|
+
if len(messages) == 1:
|
|
582
|
+
return [], messages
|
|
583
|
+
|
|
584
|
+
# Get the configured protected token count
|
|
585
|
+
protected_tokens_limit = get_protected_token_count()
|
|
586
|
+
|
|
587
|
+
# Calculate tokens for messages from most recent backwards (excluding system message)
|
|
588
|
+
protected_messages = []
|
|
589
|
+
protected_token_count = system_tokens # Start with system message tokens
|
|
590
|
+
|
|
591
|
+
# Go backwards through non-system messages to find protected zone
|
|
592
|
+
for i in range(
|
|
593
|
+
len(messages) - 1, 0, -1
|
|
594
|
+
): # Stop at 1, not 0 (skip system message)
|
|
595
|
+
message = messages[i]
|
|
596
|
+
message_tokens = self.estimate_tokens_for_message(message)
|
|
597
|
+
|
|
598
|
+
# If adding this message would exceed protected tokens, stop here
|
|
599
|
+
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
600
|
+
break
|
|
601
|
+
|
|
602
|
+
protected_messages.append(message)
|
|
603
|
+
protected_token_count += message_tokens
|
|
604
|
+
|
|
605
|
+
# Messages that were added while scanning backwards are currently in reverse order.
|
|
606
|
+
# Reverse them to restore chronological ordering, then prepend the system prompt.
|
|
607
|
+
protected_messages.reverse()
|
|
608
|
+
protected_messages.insert(0, system_message)
|
|
609
|
+
|
|
610
|
+
# Messages to summarize are everything between the system message and the
|
|
611
|
+
# protected tail zone we just constructed.
|
|
612
|
+
protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
|
|
613
|
+
messages_to_summarize = messages[1:protected_start_idx]
|
|
614
|
+
|
|
615
|
+
# Emit info messages
|
|
616
|
+
emit_info(
|
|
617
|
+
f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
|
|
618
|
+
)
|
|
619
|
+
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
620
|
+
|
|
621
|
+
return messages_to_summarize, protected_messages
|
|
622
|
+
|
|
623
|
+
def summarize_messages(
|
|
624
|
+
self, messages: List[ModelMessage], with_protection: bool = True
|
|
625
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
626
|
+
"""
|
|
627
|
+
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
|
628
|
+
|
|
629
|
+
Returns:
|
|
630
|
+
Tuple of (compacted_messages, summarized_source_messages)
|
|
631
|
+
where compacted_messages always preserves the original system message
|
|
632
|
+
as the first entry.
|
|
633
|
+
"""
|
|
634
|
+
messages_to_summarize: List[ModelMessage]
|
|
635
|
+
protected_messages: List[ModelMessage]
|
|
636
|
+
|
|
637
|
+
if with_protection:
|
|
638
|
+
messages_to_summarize, protected_messages = (
|
|
639
|
+
self.split_messages_for_protected_summarization(messages)
|
|
640
|
+
)
|
|
641
|
+
else:
|
|
642
|
+
messages_to_summarize = messages[1:] if messages else []
|
|
643
|
+
protected_messages = messages[:1]
|
|
644
|
+
|
|
645
|
+
if not messages:
|
|
646
|
+
return [], []
|
|
647
|
+
|
|
648
|
+
system_message = messages[0]
|
|
649
|
+
|
|
650
|
+
if not messages_to_summarize:
|
|
651
|
+
# Nothing to summarize, so just return the original sequence
|
|
652
|
+
return self.prune_interrupted_tool_calls(messages), []
|
|
653
|
+
|
|
654
|
+
instructions = (
|
|
655
|
+
"The input will be a log of Agentic AI steps that have been taken"
|
|
656
|
+
" as well as user queries, etc. Summarize the contents of these steps."
|
|
657
|
+
" The high level details should remain but the bulk of the content from tool-call"
|
|
658
|
+
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
659
|
+
" reading a file, and the file contents are large, then in your summary you might just"
|
|
660
|
+
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
661
|
+
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
662
|
+
"\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
try:
|
|
666
|
+
new_messages = run_summarization_sync(
|
|
667
|
+
instructions, message_history=messages_to_summarize
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
if not isinstance(new_messages, list):
|
|
671
|
+
emit_warning(
|
|
672
|
+
"Summarization agent returned non-list output; wrapping into message request"
|
|
673
|
+
)
|
|
674
|
+
new_messages = [ModelRequest([TextPart(str(new_messages))])]
|
|
675
|
+
|
|
676
|
+
compacted: List[ModelMessage] = [system_message] + list(new_messages)
|
|
677
|
+
|
|
678
|
+
# Drop the system message from protected_messages because we already included it
|
|
679
|
+
protected_tail = [
|
|
680
|
+
msg for msg in protected_messages if msg is not system_message
|
|
681
|
+
]
|
|
682
|
+
|
|
683
|
+
compacted.extend(protected_tail)
|
|
684
|
+
|
|
685
|
+
return self.prune_interrupted_tool_calls(compacted), messages_to_summarize
|
|
686
|
+
except Exception as e:
|
|
687
|
+
emit_error(f"Summarization failed during compaction: {e}")
|
|
688
|
+
return messages, [] # Return original messages on failure
|
|
689
|
+
|
|
690
|
+
def get_model_context_length(self) -> int:
|
|
691
|
+
"""
|
|
692
|
+
Return the context length for this agent's effective model.
|
|
693
|
+
|
|
694
|
+
Honors per-agent pinned model via `self.get_model_name()`; falls back
|
|
695
|
+
to global model when no pin is set. Defaults conservatively on failure.
|
|
696
|
+
"""
|
|
697
|
+
try:
|
|
698
|
+
model_configs = ModelFactory.load_config()
|
|
699
|
+
# Use the agent's effective model (respects /pin_model)
|
|
700
|
+
model_name = self.get_model_name()
|
|
701
|
+
model_config = model_configs.get(model_name, {})
|
|
702
|
+
context_length = model_config.get("context_length", 128000)
|
|
703
|
+
return int(context_length)
|
|
704
|
+
except Exception:
|
|
705
|
+
# Be safe; don't blow up status/compaction if model lookup fails
|
|
706
|
+
return 128000
|
|
707
|
+
|
|
708
|
+
def has_pending_tool_calls(self, messages: List[ModelMessage]) -> bool:
|
|
709
|
+
"""
|
|
710
|
+
Check if there are any pending tool calls in the message history.
|
|
711
|
+
|
|
712
|
+
A pending tool call is one that has a ToolCallPart without a corresponding
|
|
713
|
+
ToolReturnPart. This indicates the model is still waiting for tool execution.
|
|
714
|
+
|
|
715
|
+
Returns:
|
|
716
|
+
True if there are pending tool calls, False otherwise
|
|
717
|
+
"""
|
|
718
|
+
if not messages:
|
|
719
|
+
return False
|
|
720
|
+
|
|
721
|
+
tool_call_ids: Set[str] = set()
|
|
722
|
+
tool_return_ids: Set[str] = set()
|
|
723
|
+
|
|
724
|
+
# Collect all tool call and return IDs
|
|
725
|
+
for msg in messages:
|
|
726
|
+
for part in getattr(msg, "parts", []) or []:
|
|
727
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
728
|
+
if not tool_call_id:
|
|
729
|
+
continue
|
|
730
|
+
|
|
731
|
+
if part.part_kind == "tool-call":
|
|
732
|
+
tool_call_ids.add(tool_call_id)
|
|
733
|
+
elif part.part_kind == "tool-return":
|
|
734
|
+
tool_return_ids.add(tool_call_id)
|
|
735
|
+
|
|
736
|
+
# Pending tool calls are those without corresponding returns
|
|
737
|
+
pending_calls = tool_call_ids - tool_return_ids
|
|
738
|
+
return len(pending_calls) > 0
|
|
739
|
+
|
|
740
|
+
def request_delayed_compaction(self) -> None:
|
|
741
|
+
"""
|
|
742
|
+
Request that compaction be attempted after the current tool calls complete.
|
|
743
|
+
|
|
744
|
+
This sets a global flag that will be checked during the next message
|
|
745
|
+
processing cycle to trigger compaction when it's safe to do so.
|
|
746
|
+
"""
|
|
747
|
+
global _delayed_compaction_requested
|
|
748
|
+
_delayed_compaction_requested = True
|
|
749
|
+
emit_info(
|
|
750
|
+
"🔄 Delayed compaction requested - will attempt after tool calls complete",
|
|
751
|
+
message_group="token_context_status",
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
def should_attempt_delayed_compaction(self) -> bool:
|
|
755
|
+
"""
|
|
756
|
+
Check if delayed compaction was requested and it's now safe to proceed.
|
|
757
|
+
|
|
758
|
+
Returns:
|
|
759
|
+
True if delayed compaction was requested and no tool calls are pending
|
|
760
|
+
"""
|
|
761
|
+
global _delayed_compaction_requested
|
|
762
|
+
if not _delayed_compaction_requested:
|
|
763
|
+
return False
|
|
764
|
+
|
|
765
|
+
# Check if it's now safe to compact
|
|
766
|
+
messages = self.get_message_history()
|
|
767
|
+
if not self.has_pending_tool_calls(messages):
|
|
768
|
+
_delayed_compaction_requested = False # Reset the flag
|
|
769
|
+
return True
|
|
770
|
+
|
|
771
|
+
return False
|
|
772
|
+
|
|
773
|
+
def get_pending_tool_call_count(self, messages: List[ModelMessage]) -> int:
|
|
774
|
+
"""
|
|
775
|
+
Get the count of pending tool calls for debugging purposes.
|
|
776
|
+
|
|
777
|
+
Returns:
|
|
778
|
+
Number of tool calls waiting for execution
|
|
779
|
+
"""
|
|
780
|
+
if not messages:
|
|
781
|
+
return 0
|
|
782
|
+
|
|
783
|
+
tool_call_ids: Set[str] = set()
|
|
784
|
+
tool_return_ids: Set[str] = set()
|
|
785
|
+
|
|
786
|
+
for msg in messages:
|
|
787
|
+
for part in getattr(msg, "parts", []) or []:
|
|
788
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
789
|
+
if not tool_call_id:
|
|
790
|
+
continue
|
|
791
|
+
|
|
792
|
+
if part.part_kind == "tool-call":
|
|
793
|
+
tool_call_ids.add(tool_call_id)
|
|
794
|
+
elif part.part_kind == "tool-return":
|
|
795
|
+
tool_return_ids.add(tool_call_id)
|
|
796
|
+
|
|
797
|
+
pending_calls = tool_call_ids - tool_return_ids
|
|
798
|
+
return len(pending_calls)
|
|
799
|
+
|
|
800
|
+
def prune_interrupted_tool_calls(
|
|
801
|
+
self, messages: List[ModelMessage]
|
|
802
|
+
) -> List[ModelMessage]:
|
|
803
|
+
"""
|
|
804
|
+
Remove any messages that participate in mismatched tool call sequences.
|
|
805
|
+
|
|
806
|
+
A mismatched tool call id is one that appears in a ToolCall (model/tool request)
|
|
807
|
+
without a corresponding tool return, or vice versa. We preserve original order
|
|
808
|
+
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
809
|
+
"""
|
|
810
|
+
if not messages:
|
|
811
|
+
return messages
|
|
812
|
+
|
|
813
|
+
tool_call_ids: Set[str] = set()
|
|
814
|
+
tool_return_ids: Set[str] = set()
|
|
815
|
+
|
|
816
|
+
# First pass: collect ids for calls vs returns
|
|
817
|
+
for msg in messages:
|
|
818
|
+
for part in getattr(msg, "parts", []) or []:
|
|
819
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
820
|
+
if not tool_call_id:
|
|
821
|
+
continue
|
|
822
|
+
# Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
|
|
823
|
+
# consider it a call; otherwise it's a return/result.
|
|
824
|
+
if part.part_kind == "tool-call":
|
|
825
|
+
tool_call_ids.add(tool_call_id)
|
|
826
|
+
else:
|
|
827
|
+
tool_return_ids.add(tool_call_id)
|
|
828
|
+
|
|
829
|
+
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
830
|
+
if not mismatched:
|
|
831
|
+
return messages
|
|
832
|
+
|
|
833
|
+
pruned: List[ModelMessage] = []
|
|
834
|
+
dropped_count = 0
|
|
835
|
+
for msg in messages:
|
|
836
|
+
has_mismatched = False
|
|
837
|
+
for part in getattr(msg, "parts", []) or []:
|
|
838
|
+
tcid = getattr(part, "tool_call_id", None)
|
|
839
|
+
if tcid and tcid in mismatched:
|
|
840
|
+
has_mismatched = True
|
|
841
|
+
break
|
|
842
|
+
if has_mismatched:
|
|
843
|
+
dropped_count += 1
|
|
844
|
+
continue
|
|
845
|
+
pruned.append(msg)
|
|
846
|
+
return pruned
|
|
847
|
+
|
|
848
|
+
def message_history_processor(
|
|
849
|
+
self, ctx: RunContext, messages: List[ModelMessage]
|
|
850
|
+
) -> List[ModelMessage]:
|
|
851
|
+
# First, prune any interrupted/mismatched tool-call conversations
|
|
852
|
+
model_max = self.get_model_context_length()
|
|
853
|
+
|
|
854
|
+
message_tokens = sum(self.estimate_tokens_for_message(msg) for msg in messages)
|
|
855
|
+
context_overhead = self.estimate_context_overhead_tokens()
|
|
856
|
+
total_current_tokens = message_tokens + context_overhead
|
|
857
|
+
proportion_used = total_current_tokens / model_max
|
|
858
|
+
|
|
859
|
+
context_summary = SpinnerBase.format_context_info(
|
|
860
|
+
total_current_tokens, model_max, proportion_used
|
|
861
|
+
)
|
|
862
|
+
update_spinner_context(context_summary)
|
|
863
|
+
|
|
864
|
+
# Get the configured compaction threshold
|
|
865
|
+
compaction_threshold = get_compaction_threshold()
|
|
866
|
+
|
|
867
|
+
# Get the configured compaction strategy
|
|
868
|
+
compaction_strategy = get_compaction_strategy()
|
|
869
|
+
|
|
870
|
+
if proportion_used > compaction_threshold:
|
|
871
|
+
# RACE CONDITION PROTECTION: Check for pending tool calls before summarization
|
|
872
|
+
if compaction_strategy == "summarization" and self.has_pending_tool_calls(
|
|
873
|
+
messages
|
|
874
|
+
):
|
|
875
|
+
pending_count = self.get_pending_tool_call_count(messages)
|
|
876
|
+
emit_warning(
|
|
877
|
+
f"⚠️ Summarization deferred: {pending_count} pending tool call(s) detected. "
|
|
878
|
+
"Waiting for tool execution to complete before compaction.",
|
|
879
|
+
message_group="token_context_status",
|
|
880
|
+
)
|
|
881
|
+
# Request delayed compaction for when tool calls complete
|
|
882
|
+
self.request_delayed_compaction()
|
|
883
|
+
# Return original messages without compaction
|
|
884
|
+
return messages, []
|
|
885
|
+
|
|
886
|
+
if compaction_strategy == "truncation":
|
|
887
|
+
# Use truncation instead of summarization
|
|
888
|
+
protected_tokens = get_protected_token_count()
|
|
889
|
+
result_messages = self.truncation(
|
|
890
|
+
self.filter_huge_messages(messages), protected_tokens
|
|
891
|
+
)
|
|
892
|
+
summarized_messages = [] # No summarization in truncation mode
|
|
893
|
+
else:
|
|
894
|
+
# Default to summarization (safe to proceed - no pending tool calls)
|
|
895
|
+
result_messages, summarized_messages = self.summarize_messages(
|
|
896
|
+
self.filter_huge_messages(messages)
|
|
897
|
+
)
|
|
898
|
+
|
|
899
|
+
final_token_count = sum(
|
|
900
|
+
self.estimate_tokens_for_message(msg) for msg in result_messages
|
|
901
|
+
)
|
|
902
|
+
# Update spinner with final token count
|
|
903
|
+
final_summary = SpinnerBase.format_context_info(
|
|
904
|
+
final_token_count, model_max, final_token_count / model_max
|
|
905
|
+
)
|
|
906
|
+
update_spinner_context(final_summary)
|
|
907
|
+
|
|
908
|
+
self.set_message_history(result_messages)
|
|
909
|
+
for m in summarized_messages:
|
|
910
|
+
self.add_compacted_message_hash(self.hash_message(m))
|
|
911
|
+
return result_messages
|
|
912
|
+
return messages
|
|
913
|
+
|
|
914
|
+
def truncation(
|
|
915
|
+
self, messages: List[ModelMessage], protected_tokens: int
|
|
916
|
+
) -> List[ModelMessage]:
|
|
917
|
+
"""
|
|
918
|
+
Truncate message history to manage token usage.
|
|
919
|
+
|
|
920
|
+
Protects:
|
|
921
|
+
- The first message (system prompt) - always kept
|
|
922
|
+
- The second message if it contains a ThinkingPart (extended thinking context)
|
|
923
|
+
- The most recent messages up to protected_tokens
|
|
924
|
+
|
|
925
|
+
Args:
|
|
926
|
+
messages: List of messages to truncate
|
|
927
|
+
protected_tokens: Number of tokens to protect
|
|
928
|
+
|
|
929
|
+
Returns:
|
|
930
|
+
Truncated list of messages
|
|
931
|
+
"""
|
|
932
|
+
import queue
|
|
933
|
+
|
|
934
|
+
emit_info("Truncating message history to manage token usage")
|
|
935
|
+
result = [messages[0]] # Always keep the first message (system prompt)
|
|
936
|
+
|
|
937
|
+
# Check if second message exists and contains a ThinkingPart
|
|
938
|
+
# If so, protect it (extended thinking context shouldn't be lost)
|
|
939
|
+
skip_second = False
|
|
940
|
+
if len(messages) > 1:
|
|
941
|
+
second_msg = messages[1]
|
|
942
|
+
has_thinking = any(
|
|
943
|
+
isinstance(part, ThinkingPart) for part in second_msg.parts
|
|
944
|
+
)
|
|
945
|
+
if has_thinking:
|
|
946
|
+
result.append(second_msg)
|
|
947
|
+
skip_second = True
|
|
948
|
+
|
|
949
|
+
num_tokens = 0
|
|
950
|
+
stack = queue.LifoQueue()
|
|
951
|
+
|
|
952
|
+
# Determine which messages to consider for the recent-tokens window
|
|
953
|
+
# Skip first message (already added), and skip second if it has thinking
|
|
954
|
+
start_idx = 2 if skip_second else 1
|
|
955
|
+
messages_to_scan = messages[start_idx:]
|
|
956
|
+
|
|
957
|
+
# Put messages in reverse order (most recent first) into the stack
|
|
958
|
+
# but break when we exceed protected_tokens
|
|
959
|
+
for msg in reversed(messages_to_scan):
|
|
960
|
+
num_tokens += self.estimate_tokens_for_message(msg)
|
|
961
|
+
if num_tokens > protected_tokens:
|
|
962
|
+
break
|
|
963
|
+
stack.put(msg)
|
|
964
|
+
|
|
965
|
+
# Pop messages from stack to get them in chronological order
|
|
966
|
+
while not stack.empty():
|
|
967
|
+
result.append(stack.get())
|
|
968
|
+
|
|
969
|
+
result = self.prune_interrupted_tool_calls(result)
|
|
970
|
+
return result
|
|
971
|
+
|
|
972
|
+
def run_summarization_sync(
|
|
973
|
+
self,
|
|
974
|
+
instructions: str,
|
|
975
|
+
message_history: List[ModelMessage],
|
|
976
|
+
) -> Union[List[ModelMessage], str]:
|
|
977
|
+
"""
|
|
978
|
+
Run summarization synchronously using the configured summarization agent.
|
|
979
|
+
This is exposed as a method so it can be overridden by subclasses if needed.
|
|
980
|
+
|
|
981
|
+
Args:
|
|
982
|
+
instructions: Instructions for the summarization agent
|
|
983
|
+
message_history: List of messages to summarize
|
|
984
|
+
|
|
985
|
+
Returns:
|
|
986
|
+
Summarized messages or text
|
|
987
|
+
"""
|
|
988
|
+
return run_summarization_sync(instructions, message_history)
|
|
989
|
+
|
|
990
|
+
# ===== Agent wiring formerly in code_puppy/agent.py =====
|
|
991
|
+
def load_puppy_rules(self) -> Optional[str]:
|
|
992
|
+
"""Load AGENT(S).md from both global config and project directory.
|
|
993
|
+
|
|
994
|
+
Checks for AGENTS.md/AGENT.md/agents.md/agent.md in this order:
|
|
995
|
+
1. Global config directory (~/.code_puppy/ or XDG config)
|
|
996
|
+
2. Current working directory (project-specific)
|
|
997
|
+
|
|
998
|
+
If both exist, they are combined with global rules first, then project rules.
|
|
999
|
+
This allows project-specific rules to override or extend global rules.
|
|
1000
|
+
"""
|
|
1001
|
+
if self._puppy_rules is not None:
|
|
1002
|
+
return self._puppy_rules
|
|
1003
|
+
from pathlib import Path
|
|
1004
|
+
|
|
1005
|
+
possible_paths = ["AGENTS.md", "AGENT.md", "agents.md", "agent.md"]
|
|
1006
|
+
|
|
1007
|
+
# Load global rules from CONFIG_DIR
|
|
1008
|
+
global_rules = None
|
|
1009
|
+
from code_puppy.config import CONFIG_DIR
|
|
1010
|
+
|
|
1011
|
+
for path_str in possible_paths:
|
|
1012
|
+
global_path = Path(CONFIG_DIR) / path_str
|
|
1013
|
+
if global_path.exists():
|
|
1014
|
+
global_rules = global_path.read_text(encoding="utf-8-sig")
|
|
1015
|
+
break
|
|
1016
|
+
|
|
1017
|
+
# Load project-local rules from current working directory
|
|
1018
|
+
project_rules = None
|
|
1019
|
+
for path_str in possible_paths:
|
|
1020
|
+
project_path = Path(path_str)
|
|
1021
|
+
if project_path.exists():
|
|
1022
|
+
project_rules = project_path.read_text(encoding="utf-8-sig")
|
|
1023
|
+
break
|
|
1024
|
+
|
|
1025
|
+
# Combine global and project rules
|
|
1026
|
+
# Global rules come first, project rules second (allowing project to override)
|
|
1027
|
+
rules = [r for r in [global_rules, project_rules] if r]
|
|
1028
|
+
self._puppy_rules = "\n\n".join(rules) if rules else None
|
|
1029
|
+
return self._puppy_rules
|
|
1030
|
+
|
|
1031
|
+
def load_mcp_servers(self, extra_headers: Optional[Dict[str, str]] = None):
|
|
1032
|
+
"""Load MCP servers through the manager and return pydantic-ai compatible servers.
|
|
1033
|
+
|
|
1034
|
+
Note: The manager automatically syncs from mcp_servers.json during initialization,
|
|
1035
|
+
so we don't need to sync here. Use reload_mcp_servers() to force a re-sync.
|
|
1036
|
+
"""
|
|
1037
|
+
|
|
1038
|
+
mcp_disabled = get_value("disable_mcp_servers")
|
|
1039
|
+
if mcp_disabled and str(mcp_disabled).lower() in ("1", "true", "yes", "on"):
|
|
1040
|
+
return []
|
|
1041
|
+
|
|
1042
|
+
manager = get_mcp_manager()
|
|
1043
|
+
return manager.get_servers_for_agent()
|
|
1044
|
+
|
|
1045
|
+
def reload_mcp_servers(self):
|
|
1046
|
+
"""Reload MCP servers and return updated servers.
|
|
1047
|
+
|
|
1048
|
+
Forces a re-sync from mcp_servers.json to pick up any configuration changes.
|
|
1049
|
+
"""
|
|
1050
|
+
# Clear the MCP tool cache when servers are reloaded
|
|
1051
|
+
self._mcp_tool_definitions_cache = []
|
|
1052
|
+
|
|
1053
|
+
# Force re-sync from mcp_servers.json
|
|
1054
|
+
manager = get_mcp_manager()
|
|
1055
|
+
manager.sync_from_config()
|
|
1056
|
+
|
|
1057
|
+
return manager.get_servers_for_agent()
|
|
1058
|
+
|
|
1059
|
+
def _load_model_with_fallback(
|
|
1060
|
+
self,
|
|
1061
|
+
requested_model_name: str,
|
|
1062
|
+
models_config: Dict[str, Any],
|
|
1063
|
+
message_group: str,
|
|
1064
|
+
) -> Tuple[Any, str]:
|
|
1065
|
+
"""Load the requested model, applying a friendly fallback when unavailable."""
|
|
1066
|
+
try:
|
|
1067
|
+
model = ModelFactory.get_model(requested_model_name, models_config)
|
|
1068
|
+
return model, requested_model_name
|
|
1069
|
+
except ValueError as exc:
|
|
1070
|
+
available_models = list(models_config.keys())
|
|
1071
|
+
available_str = (
|
|
1072
|
+
", ".join(sorted(available_models))
|
|
1073
|
+
if available_models
|
|
1074
|
+
else "no configured models"
|
|
1075
|
+
)
|
|
1076
|
+
emit_warning(
|
|
1077
|
+
(
|
|
1078
|
+
f"Model '{requested_model_name}' not found. "
|
|
1079
|
+
f"Available models: {available_str}"
|
|
1080
|
+
),
|
|
1081
|
+
message_group=message_group,
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
fallback_candidates: List[str] = []
|
|
1085
|
+
global_candidate = get_global_model_name()
|
|
1086
|
+
if global_candidate:
|
|
1087
|
+
fallback_candidates.append(global_candidate)
|
|
1088
|
+
|
|
1089
|
+
for candidate in available_models:
|
|
1090
|
+
if candidate not in fallback_candidates:
|
|
1091
|
+
fallback_candidates.append(candidate)
|
|
1092
|
+
|
|
1093
|
+
for candidate in fallback_candidates:
|
|
1094
|
+
if not candidate or candidate == requested_model_name:
|
|
1095
|
+
continue
|
|
1096
|
+
try:
|
|
1097
|
+
model = ModelFactory.get_model(candidate, models_config)
|
|
1098
|
+
emit_info(
|
|
1099
|
+
f"Using fallback model: {candidate}",
|
|
1100
|
+
message_group=message_group,
|
|
1101
|
+
)
|
|
1102
|
+
return model, candidate
|
|
1103
|
+
except ValueError:
|
|
1104
|
+
continue
|
|
1105
|
+
|
|
1106
|
+
friendly_message = (
|
|
1107
|
+
"No valid model could be loaded. Update the model configuration or set "
|
|
1108
|
+
"a valid model with `config set`."
|
|
1109
|
+
)
|
|
1110
|
+
emit_error(
|
|
1111
|
+
friendly_message,
|
|
1112
|
+
message_group=message_group,
|
|
1113
|
+
)
|
|
1114
|
+
raise ValueError(friendly_message) from exc
|
|
1115
|
+
|
|
1116
|
+
def reload_code_generation_agent(self, message_group: Optional[str] = None):
|
|
1117
|
+
"""Force-reload the pydantic-ai Agent based on current config and model."""
|
|
1118
|
+
from code_puppy.tools import register_tools_for_agent
|
|
1119
|
+
|
|
1120
|
+
if message_group is None:
|
|
1121
|
+
message_group = str(uuid.uuid4())
|
|
1122
|
+
|
|
1123
|
+
model_name = self.get_model_name()
|
|
1124
|
+
|
|
1125
|
+
models_config = ModelFactory.load_config()
|
|
1126
|
+
model, resolved_model_name = self._load_model_with_fallback(
|
|
1127
|
+
model_name,
|
|
1128
|
+
models_config,
|
|
1129
|
+
message_group,
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
instructions = self.get_system_prompt()
|
|
1133
|
+
puppy_rules = self.load_puppy_rules()
|
|
1134
|
+
if puppy_rules:
|
|
1135
|
+
instructions += f"\n{puppy_rules}"
|
|
1136
|
+
|
|
1137
|
+
mcp_servers = self.load_mcp_servers()
|
|
1138
|
+
|
|
1139
|
+
model_settings = make_model_settings(resolved_model_name)
|
|
1140
|
+
|
|
1141
|
+
# Handle claude-code models: swap instructions (prompt prepending happens in run_with_mcp)
|
|
1142
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1143
|
+
|
|
1144
|
+
prepared = prepare_prompt_for_model(
|
|
1145
|
+
model_name, instructions, "", prepend_system_to_user=False
|
|
1146
|
+
)
|
|
1147
|
+
instructions = prepared.instructions
|
|
1148
|
+
|
|
1149
|
+
self.cur_model = model
|
|
1150
|
+
p_agent = PydanticAgent(
|
|
1151
|
+
model=model,
|
|
1152
|
+
instructions=instructions,
|
|
1153
|
+
output_type=str,
|
|
1154
|
+
retries=3,
|
|
1155
|
+
toolsets=mcp_servers,
|
|
1156
|
+
history_processors=[self.message_history_accumulator],
|
|
1157
|
+
model_settings=model_settings,
|
|
1158
|
+
)
|
|
1159
|
+
|
|
1160
|
+
agent_tools = self.get_available_tools()
|
|
1161
|
+
register_tools_for_agent(p_agent, agent_tools)
|
|
1162
|
+
|
|
1163
|
+
# Get existing tool names to filter out conflicts with MCP tools
|
|
1164
|
+
existing_tool_names = set()
|
|
1165
|
+
try:
|
|
1166
|
+
# Get tools from the agent to find existing tool names
|
|
1167
|
+
tools = getattr(p_agent, "_tools", None)
|
|
1168
|
+
if tools:
|
|
1169
|
+
existing_tool_names = set(tools.keys())
|
|
1170
|
+
except Exception:
|
|
1171
|
+
# If we can't get tool names, proceed without filtering
|
|
1172
|
+
pass
|
|
1173
|
+
|
|
1174
|
+
# Filter MCP server toolsets to remove conflicting tools
|
|
1175
|
+
filtered_mcp_servers = []
|
|
1176
|
+
if mcp_servers and existing_tool_names:
|
|
1177
|
+
for mcp_server in mcp_servers:
|
|
1178
|
+
try:
|
|
1179
|
+
# Get tools from this MCP server
|
|
1180
|
+
server_tools = getattr(mcp_server, "tools", None)
|
|
1181
|
+
if server_tools:
|
|
1182
|
+
# Filter out conflicting tools
|
|
1183
|
+
filtered_tools = {}
|
|
1184
|
+
for tool_name, tool_func in server_tools.items():
|
|
1185
|
+
if tool_name not in existing_tool_names:
|
|
1186
|
+
filtered_tools[tool_name] = tool_func
|
|
1187
|
+
|
|
1188
|
+
# Create a filtered version of the MCP server if we have tools
|
|
1189
|
+
if filtered_tools:
|
|
1190
|
+
# Create a new toolset with filtered tools
|
|
1191
|
+
from pydantic_ai.tools import ToolSet
|
|
1192
|
+
|
|
1193
|
+
filtered_toolset = ToolSet()
|
|
1194
|
+
for tool_name, tool_func in filtered_tools.items():
|
|
1195
|
+
filtered_toolset._tools[tool_name] = tool_func
|
|
1196
|
+
filtered_mcp_servers.append(filtered_toolset)
|
|
1197
|
+
else:
|
|
1198
|
+
# No tools left after filtering, skip this server
|
|
1199
|
+
pass
|
|
1200
|
+
else:
|
|
1201
|
+
# Can't get tools from this server, include as-is
|
|
1202
|
+
filtered_mcp_servers.append(mcp_server)
|
|
1203
|
+
except Exception:
|
|
1204
|
+
# Error processing this server, include as-is to be safe
|
|
1205
|
+
filtered_mcp_servers.append(mcp_server)
|
|
1206
|
+
else:
|
|
1207
|
+
# No filtering needed or possible
|
|
1208
|
+
filtered_mcp_servers = mcp_servers if mcp_servers else []
|
|
1209
|
+
|
|
1210
|
+
if len(filtered_mcp_servers) != len(mcp_servers):
|
|
1211
|
+
emit_info(
|
|
1212
|
+
Text.from_markup(
|
|
1213
|
+
f"[dim]Filtered {len(mcp_servers) - len(filtered_mcp_servers)} conflicting MCP tools[/dim]"
|
|
1214
|
+
)
|
|
1215
|
+
)
|
|
1216
|
+
|
|
1217
|
+
self._last_model_name = resolved_model_name
|
|
1218
|
+
# expose for run_with_mcp
|
|
1219
|
+
# Wrap it with DBOS, but handle MCP servers separately to avoid serialization issues
|
|
1220
|
+
global _reload_count
|
|
1221
|
+
_reload_count += 1
|
|
1222
|
+
if get_use_dbos():
|
|
1223
|
+
# Don't pass MCP servers to the agent constructor when using DBOS
|
|
1224
|
+
# This prevents the "cannot pickle async_generator object" error
|
|
1225
|
+
# MCP servers will be handled separately in run_with_mcp
|
|
1226
|
+
agent_without_mcp = PydanticAgent(
|
|
1227
|
+
model=model,
|
|
1228
|
+
instructions=instructions,
|
|
1229
|
+
output_type=str,
|
|
1230
|
+
retries=3,
|
|
1231
|
+
toolsets=[], # Don't include MCP servers here
|
|
1232
|
+
history_processors=[self.message_history_accumulator],
|
|
1233
|
+
model_settings=model_settings,
|
|
1234
|
+
)
|
|
1235
|
+
|
|
1236
|
+
# Register regular tools (non-MCP) on the new agent
|
|
1237
|
+
agent_tools = self.get_available_tools()
|
|
1238
|
+
register_tools_for_agent(agent_without_mcp, agent_tools)
|
|
1239
|
+
|
|
1240
|
+
# Wrap with DBOS - pass event_stream_handler at construction time
|
|
1241
|
+
# so DBOSModel gets the handler for streaming output
|
|
1242
|
+
dbos_agent = DBOSAgent(
|
|
1243
|
+
agent_without_mcp,
|
|
1244
|
+
name=f"{self.name}-{_reload_count}",
|
|
1245
|
+
event_stream_handler=event_stream_handler,
|
|
1246
|
+
)
|
|
1247
|
+
self.pydantic_agent = dbos_agent
|
|
1248
|
+
self._code_generation_agent = dbos_agent
|
|
1249
|
+
|
|
1250
|
+
# Store filtered MCP servers separately for runtime use
|
|
1251
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1252
|
+
else:
|
|
1253
|
+
# Normal path without DBOS - include filtered MCP servers in the agent
|
|
1254
|
+
# Re-create agent with filtered MCP servers
|
|
1255
|
+
p_agent = PydanticAgent(
|
|
1256
|
+
model=model,
|
|
1257
|
+
instructions=instructions,
|
|
1258
|
+
output_type=str,
|
|
1259
|
+
retries=3,
|
|
1260
|
+
toolsets=filtered_mcp_servers,
|
|
1261
|
+
history_processors=[self.message_history_accumulator],
|
|
1262
|
+
model_settings=model_settings,
|
|
1263
|
+
)
|
|
1264
|
+
# Register regular tools on the agent
|
|
1265
|
+
agent_tools = self.get_available_tools()
|
|
1266
|
+
register_tools_for_agent(p_agent, agent_tools)
|
|
1267
|
+
|
|
1268
|
+
self.pydantic_agent = p_agent
|
|
1269
|
+
self._code_generation_agent = p_agent
|
|
1270
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1271
|
+
self._mcp_servers = mcp_servers
|
|
1272
|
+
return self._code_generation_agent
|
|
1273
|
+
|
|
1274
|
+
def _create_agent_with_output_type(self, output_type: Type[Any]) -> PydanticAgent:
|
|
1275
|
+
"""Create a temporary agent configured with a custom output_type.
|
|
1276
|
+
|
|
1277
|
+
This is used when structured output is requested via run_with_mcp.
|
|
1278
|
+
The agent is created fresh with the same configuration as the main agent
|
|
1279
|
+
but with the specified output_type instead of str.
|
|
1280
|
+
|
|
1281
|
+
Args:
|
|
1282
|
+
output_type: The Pydantic model or type for structured output.
|
|
1283
|
+
|
|
1284
|
+
Returns:
|
|
1285
|
+
A configured PydanticAgent (or DBOSAgent wrapper) with the custom output_type.
|
|
1286
|
+
"""
|
|
1287
|
+
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1288
|
+
from code_puppy.tools import register_tools_for_agent
|
|
1289
|
+
|
|
1290
|
+
model_name = self.get_model_name()
|
|
1291
|
+
models_config = ModelFactory.load_config()
|
|
1292
|
+
model, resolved_model_name = self._load_model_with_fallback(
|
|
1293
|
+
model_name, models_config, str(uuid.uuid4())
|
|
1294
|
+
)
|
|
1295
|
+
|
|
1296
|
+
instructions = self.get_system_prompt()
|
|
1297
|
+
puppy_rules = self.load_puppy_rules()
|
|
1298
|
+
if puppy_rules:
|
|
1299
|
+
instructions += f"\n{puppy_rules}"
|
|
1300
|
+
|
|
1301
|
+
mcp_servers = getattr(self, "_mcp_servers", []) or []
|
|
1302
|
+
model_settings = make_model_settings(resolved_model_name)
|
|
1303
|
+
|
|
1304
|
+
prepared = prepare_prompt_for_model(
|
|
1305
|
+
model_name, instructions, "", prepend_system_to_user=False
|
|
1306
|
+
)
|
|
1307
|
+
instructions = prepared.instructions
|
|
1308
|
+
|
|
1309
|
+
global _reload_count
|
|
1310
|
+
_reload_count += 1
|
|
1311
|
+
|
|
1312
|
+
if get_use_dbos():
|
|
1313
|
+
temp_agent = PydanticAgent(
|
|
1314
|
+
model=model,
|
|
1315
|
+
instructions=instructions,
|
|
1316
|
+
output_type=output_type,
|
|
1317
|
+
retries=3,
|
|
1318
|
+
toolsets=[],
|
|
1319
|
+
history_processors=[self.message_history_accumulator],
|
|
1320
|
+
model_settings=model_settings,
|
|
1321
|
+
)
|
|
1322
|
+
agent_tools = self.get_available_tools()
|
|
1323
|
+
register_tools_for_agent(temp_agent, agent_tools)
|
|
1324
|
+
# Pass event_stream_handler at construction time for streaming output
|
|
1325
|
+
dbos_agent = DBOSAgent(
|
|
1326
|
+
temp_agent,
|
|
1327
|
+
name=f"{self.name}-structured-{_reload_count}",
|
|
1328
|
+
event_stream_handler=event_stream_handler,
|
|
1329
|
+
)
|
|
1330
|
+
return dbos_agent
|
|
1331
|
+
else:
|
|
1332
|
+
temp_agent = PydanticAgent(
|
|
1333
|
+
model=model,
|
|
1334
|
+
instructions=instructions,
|
|
1335
|
+
output_type=output_type,
|
|
1336
|
+
retries=3,
|
|
1337
|
+
toolsets=mcp_servers,
|
|
1338
|
+
history_processors=[self.message_history_accumulator],
|
|
1339
|
+
model_settings=model_settings,
|
|
1340
|
+
)
|
|
1341
|
+
agent_tools = self.get_available_tools()
|
|
1342
|
+
register_tools_for_agent(temp_agent, agent_tools)
|
|
1343
|
+
return temp_agent
|
|
1344
|
+
|
|
1345
|
+
# It's okay to decorate it with DBOS.step even if not using DBOS; the decorator is a no-op in that case.
|
|
1346
|
+
@DBOS.step()
|
|
1347
|
+
def message_history_accumulator(self, ctx: RunContext, messages: List[Any]):
|
|
1348
|
+
_message_history = self.get_message_history()
|
|
1349
|
+
message_history_hashes = set([self.hash_message(m) for m in _message_history])
|
|
1350
|
+
for msg in messages:
|
|
1351
|
+
if (
|
|
1352
|
+
self.hash_message(msg) not in message_history_hashes
|
|
1353
|
+
and self.hash_message(msg) not in self.get_compacted_message_hashes()
|
|
1354
|
+
):
|
|
1355
|
+
_message_history.append(msg)
|
|
1356
|
+
|
|
1357
|
+
# Apply message history trimming using the main processor
|
|
1358
|
+
# This ensures we maintain global state while still managing context limits
|
|
1359
|
+
self.message_history_processor(ctx, _message_history)
|
|
1360
|
+
result_messages_filtered_empty_thinking = []
|
|
1361
|
+
for msg in self.get_message_history():
|
|
1362
|
+
if len(msg.parts) == 1:
|
|
1363
|
+
if isinstance(msg.parts[0], ThinkingPart):
|
|
1364
|
+
if msg.parts[0].content == "":
|
|
1365
|
+
continue
|
|
1366
|
+
result_messages_filtered_empty_thinking.append(msg)
|
|
1367
|
+
self.set_message_history(result_messages_filtered_empty_thinking)
|
|
1368
|
+
return self.get_message_history()
|
|
1369
|
+
|
|
1370
|
+
def _spawn_ctrl_x_key_listener(
|
|
1371
|
+
self,
|
|
1372
|
+
stop_event: threading.Event,
|
|
1373
|
+
on_escape: Callable[[], None],
|
|
1374
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1375
|
+
) -> Optional[threading.Thread]:
|
|
1376
|
+
"""Start a keyboard listener thread for CLI sessions.
|
|
1377
|
+
|
|
1378
|
+
Listens for Ctrl+X (shell command cancel) and optionally the configured
|
|
1379
|
+
cancel_agent_key (when not using SIGINT/Ctrl+C).
|
|
1380
|
+
|
|
1381
|
+
Args:
|
|
1382
|
+
stop_event: Event to signal the listener to stop.
|
|
1383
|
+
on_escape: Callback for Ctrl+X (shell command cancel).
|
|
1384
|
+
on_cancel_agent: Optional callback for cancel_agent_key (only used
|
|
1385
|
+
when cancel_agent_uses_signal() returns False).
|
|
1386
|
+
"""
|
|
1387
|
+
try:
|
|
1388
|
+
import sys
|
|
1389
|
+
except ImportError:
|
|
1390
|
+
return None
|
|
1391
|
+
|
|
1392
|
+
stdin = getattr(sys, "stdin", None)
|
|
1393
|
+
if stdin is None or not hasattr(stdin, "isatty"):
|
|
1394
|
+
return None
|
|
1395
|
+
try:
|
|
1396
|
+
if not stdin.isatty():
|
|
1397
|
+
return None
|
|
1398
|
+
except Exception:
|
|
1399
|
+
return None
|
|
1400
|
+
|
|
1401
|
+
def listener() -> None:
|
|
1402
|
+
try:
|
|
1403
|
+
if sys.platform.startswith("win"):
|
|
1404
|
+
self._listen_for_ctrl_x_windows(
|
|
1405
|
+
stop_event, on_escape, on_cancel_agent
|
|
1406
|
+
)
|
|
1407
|
+
else:
|
|
1408
|
+
self._listen_for_ctrl_x_posix(
|
|
1409
|
+
stop_event, on_escape, on_cancel_agent
|
|
1410
|
+
)
|
|
1411
|
+
except Exception:
|
|
1412
|
+
emit_warning(
|
|
1413
|
+
"Key listener stopped unexpectedly; press Ctrl+C to cancel."
|
|
1414
|
+
)
|
|
1415
|
+
|
|
1416
|
+
thread = threading.Thread(
|
|
1417
|
+
target=listener, name="code-puppy-key-listener", daemon=True
|
|
1418
|
+
)
|
|
1419
|
+
thread.start()
|
|
1420
|
+
return thread
|
|
1421
|
+
|
|
1422
|
+
def _listen_for_ctrl_x_windows(
|
|
1423
|
+
self,
|
|
1424
|
+
stop_event: threading.Event,
|
|
1425
|
+
on_escape: Callable[[], None],
|
|
1426
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1427
|
+
) -> None:
|
|
1428
|
+
import msvcrt
|
|
1429
|
+
import time
|
|
1430
|
+
|
|
1431
|
+
# Get the cancel agent char code if we're using keyboard-based cancel
|
|
1432
|
+
cancel_agent_char: Optional[str] = None
|
|
1433
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
1434
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
1435
|
+
|
|
1436
|
+
while not stop_event.is_set():
|
|
1437
|
+
try:
|
|
1438
|
+
if msvcrt.kbhit():
|
|
1439
|
+
key = msvcrt.getwch()
|
|
1440
|
+
if key == "\x18": # Ctrl+X
|
|
1441
|
+
try:
|
|
1442
|
+
on_escape()
|
|
1443
|
+
except Exception:
|
|
1444
|
+
emit_warning(
|
|
1445
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
1446
|
+
)
|
|
1447
|
+
elif (
|
|
1448
|
+
cancel_agent_char
|
|
1449
|
+
and on_cancel_agent
|
|
1450
|
+
and key == cancel_agent_char
|
|
1451
|
+
):
|
|
1452
|
+
try:
|
|
1453
|
+
on_cancel_agent()
|
|
1454
|
+
except Exception:
|
|
1455
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
1456
|
+
except Exception:
|
|
1457
|
+
emit_warning(
|
|
1458
|
+
"Windows key listener error; Ctrl+C is still available for cancel."
|
|
1459
|
+
)
|
|
1460
|
+
return
|
|
1461
|
+
time.sleep(0.05)
|
|
1462
|
+
|
|
1463
|
+
def _listen_for_ctrl_x_posix(
|
|
1464
|
+
self,
|
|
1465
|
+
stop_event: threading.Event,
|
|
1466
|
+
on_escape: Callable[[], None],
|
|
1467
|
+
on_cancel_agent: Optional[Callable[[], None]] = None,
|
|
1468
|
+
) -> None:
|
|
1469
|
+
import select
|
|
1470
|
+
import sys
|
|
1471
|
+
import termios
|
|
1472
|
+
import tty
|
|
1473
|
+
|
|
1474
|
+
# Get the cancel agent char code if we're using keyboard-based cancel
|
|
1475
|
+
cancel_agent_char: Optional[str] = None
|
|
1476
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
1477
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
1478
|
+
|
|
1479
|
+
stdin = sys.stdin
|
|
1480
|
+
try:
|
|
1481
|
+
fd = stdin.fileno()
|
|
1482
|
+
except (AttributeError, ValueError, OSError):
|
|
1483
|
+
return
|
|
1484
|
+
try:
|
|
1485
|
+
original_attrs = termios.tcgetattr(fd)
|
|
1486
|
+
except Exception:
|
|
1487
|
+
return
|
|
1488
|
+
|
|
1489
|
+
try:
|
|
1490
|
+
tty.setcbreak(fd)
|
|
1491
|
+
while not stop_event.is_set():
|
|
1492
|
+
try:
|
|
1493
|
+
read_ready, _, _ = select.select([stdin], [], [], 0.05)
|
|
1494
|
+
except Exception:
|
|
1495
|
+
break
|
|
1496
|
+
if not read_ready:
|
|
1497
|
+
continue
|
|
1498
|
+
data = stdin.read(1)
|
|
1499
|
+
if not data:
|
|
1500
|
+
break
|
|
1501
|
+
if data == "\x18": # Ctrl+X
|
|
1502
|
+
try:
|
|
1503
|
+
on_escape()
|
|
1504
|
+
except Exception:
|
|
1505
|
+
emit_warning(
|
|
1506
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
1507
|
+
)
|
|
1508
|
+
elif (
|
|
1509
|
+
cancel_agent_char and on_cancel_agent and data == cancel_agent_char
|
|
1510
|
+
):
|
|
1511
|
+
try:
|
|
1512
|
+
on_cancel_agent()
|
|
1513
|
+
except Exception:
|
|
1514
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
1515
|
+
finally:
|
|
1516
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs)
|
|
1517
|
+
|
|
1518
|
+
async def run_with_mcp(
|
|
1519
|
+
self,
|
|
1520
|
+
prompt: str,
|
|
1521
|
+
*,
|
|
1522
|
+
attachments: Optional[Sequence[BinaryContent]] = None,
|
|
1523
|
+
link_attachments: Optional[Sequence[Union[ImageUrl, DocumentUrl]]] = None,
|
|
1524
|
+
output_type: Optional[Type[Any]] = None,
|
|
1525
|
+
**kwargs,
|
|
1526
|
+
) -> Any:
|
|
1527
|
+
"""Run the agent with MCP servers, attachments, and full cancellation support.
|
|
1528
|
+
|
|
1529
|
+
Args:
|
|
1530
|
+
prompt: Primary user prompt text (may be empty when attachments present).
|
|
1531
|
+
attachments: Local binary payloads (e.g., dragged images) to include.
|
|
1532
|
+
link_attachments: Remote assets (image/document URLs) to include.
|
|
1533
|
+
output_type: Optional Pydantic model or type for structured output.
|
|
1534
|
+
When provided, creates a temporary agent configured to return
|
|
1535
|
+
this type instead of the default string output.
|
|
1536
|
+
**kwargs: Additional arguments forwarded to `pydantic_ai.Agent.run`.
|
|
1537
|
+
|
|
1538
|
+
Returns:
|
|
1539
|
+
The agent's response (typed according to output_type if specified).
|
|
1540
|
+
|
|
1541
|
+
Raises:
|
|
1542
|
+
asyncio.CancelledError: When execution is cancelled by user.
|
|
1543
|
+
"""
|
|
1544
|
+
# Sanitize prompt to remove invalid Unicode surrogates that can cause
|
|
1545
|
+
# encoding errors (especially common on Windows with copy-paste)
|
|
1546
|
+
if prompt:
|
|
1547
|
+
try:
|
|
1548
|
+
prompt = prompt.encode("utf-8", errors="surrogatepass").decode(
|
|
1549
|
+
"utf-8", errors="replace"
|
|
1550
|
+
)
|
|
1551
|
+
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
1552
|
+
# Fallback: filter out surrogate characters directly
|
|
1553
|
+
prompt = "".join(
|
|
1554
|
+
char if ord(char) < 0xD800 or ord(char) > 0xDFFF else "\ufffd"
|
|
1555
|
+
for char in prompt
|
|
1556
|
+
)
|
|
1557
|
+
|
|
1558
|
+
group_id = str(uuid.uuid4())
|
|
1559
|
+
# Avoid double-loading: reuse existing agent if already built
|
|
1560
|
+
pydantic_agent = (
|
|
1561
|
+
self._code_generation_agent or self.reload_code_generation_agent()
|
|
1562
|
+
)
|
|
1563
|
+
|
|
1564
|
+
# If a custom output_type is specified, create a temporary agent with that type
|
|
1565
|
+
if output_type is not None:
|
|
1566
|
+
pydantic_agent = self._create_agent_with_output_type(output_type)
|
|
1567
|
+
|
|
1568
|
+
# Handle claude-code, chatgpt-codex, and antigravity models: prepend system prompt to first user message
|
|
1569
|
+
from code_puppy.model_utils import (
|
|
1570
|
+
is_antigravity_model,
|
|
1571
|
+
is_chatgpt_codex_model,
|
|
1572
|
+
is_claude_code_model,
|
|
1573
|
+
)
|
|
1574
|
+
|
|
1575
|
+
if (
|
|
1576
|
+
is_claude_code_model(self.get_model_name())
|
|
1577
|
+
or is_chatgpt_codex_model(self.get_model_name())
|
|
1578
|
+
or is_antigravity_model(self.get_model_name())
|
|
1579
|
+
):
|
|
1580
|
+
if len(self.get_message_history()) == 0:
|
|
1581
|
+
system_prompt = self.get_system_prompt()
|
|
1582
|
+
puppy_rules = self.load_puppy_rules()
|
|
1583
|
+
if puppy_rules:
|
|
1584
|
+
system_prompt += f"\n{puppy_rules}"
|
|
1585
|
+
prompt = system_prompt + "\n\n" + prompt
|
|
1586
|
+
|
|
1587
|
+
# Build combined prompt payload when attachments are provided.
|
|
1588
|
+
attachment_parts: List[Any] = []
|
|
1589
|
+
if attachments:
|
|
1590
|
+
attachment_parts.extend(list(attachments))
|
|
1591
|
+
if link_attachments:
|
|
1592
|
+
attachment_parts.extend(list(link_attachments))
|
|
1593
|
+
|
|
1594
|
+
if attachment_parts:
|
|
1595
|
+
prompt_payload: Union[str, List[Any]] = []
|
|
1596
|
+
if prompt:
|
|
1597
|
+
prompt_payload.append(prompt)
|
|
1598
|
+
prompt_payload.extend(attachment_parts)
|
|
1599
|
+
else:
|
|
1600
|
+
prompt_payload = prompt
|
|
1601
|
+
|
|
1602
|
+
async def run_agent_task():
|
|
1603
|
+
try:
|
|
1604
|
+
self.set_message_history(
|
|
1605
|
+
self.prune_interrupted_tool_calls(self.get_message_history())
|
|
1606
|
+
)
|
|
1607
|
+
|
|
1608
|
+
# DELAYED COMPACTION: Check if we should attempt delayed compaction
|
|
1609
|
+
if self.should_attempt_delayed_compaction():
|
|
1610
|
+
emit_info(
|
|
1611
|
+
"🔄 Attempting delayed compaction (tool calls completed)",
|
|
1612
|
+
message_group="token_context_status",
|
|
1613
|
+
)
|
|
1614
|
+
current_messages = self.get_message_history()
|
|
1615
|
+
compacted_messages, _ = self.compact_messages(current_messages)
|
|
1616
|
+
if compacted_messages != current_messages:
|
|
1617
|
+
self.set_message_history(compacted_messages)
|
|
1618
|
+
emit_info(
|
|
1619
|
+
"✅ Delayed compaction completed successfully",
|
|
1620
|
+
message_group="token_context_status",
|
|
1621
|
+
)
|
|
1622
|
+
|
|
1623
|
+
usage_limits = UsageLimits(request_limit=get_message_limit())
|
|
1624
|
+
|
|
1625
|
+
# Handle MCP servers - add them temporarily when using DBOS
|
|
1626
|
+
if (
|
|
1627
|
+
get_use_dbos()
|
|
1628
|
+
and hasattr(self, "_mcp_servers")
|
|
1629
|
+
and self._mcp_servers
|
|
1630
|
+
):
|
|
1631
|
+
# Temporarily add MCP servers to the DBOS agent using internal _toolsets
|
|
1632
|
+
original_toolsets = pydantic_agent._toolsets
|
|
1633
|
+
pydantic_agent._toolsets = original_toolsets + self._mcp_servers
|
|
1634
|
+
pydantic_agent._toolsets = original_toolsets + self._mcp_servers
|
|
1635
|
+
|
|
1636
|
+
try:
|
|
1637
|
+
# Set the workflow ID for DBOS context so DBOS and Code Puppy ID match
|
|
1638
|
+
with SetWorkflowID(group_id):
|
|
1639
|
+
result_ = await pydantic_agent.run(
|
|
1640
|
+
prompt_payload,
|
|
1641
|
+
message_history=self.get_message_history(),
|
|
1642
|
+
usage_limits=usage_limits,
|
|
1643
|
+
event_stream_handler=event_stream_handler,
|
|
1644
|
+
**kwargs,
|
|
1645
|
+
)
|
|
1646
|
+
return result_
|
|
1647
|
+
finally:
|
|
1648
|
+
# Always restore original toolsets
|
|
1649
|
+
pydantic_agent._toolsets = original_toolsets
|
|
1650
|
+
elif get_use_dbos():
|
|
1651
|
+
with SetWorkflowID(group_id):
|
|
1652
|
+
result_ = await pydantic_agent.run(
|
|
1653
|
+
prompt_payload,
|
|
1654
|
+
message_history=self.get_message_history(),
|
|
1655
|
+
usage_limits=usage_limits,
|
|
1656
|
+
event_stream_handler=event_stream_handler,
|
|
1657
|
+
**kwargs,
|
|
1658
|
+
)
|
|
1659
|
+
return result_
|
|
1660
|
+
else:
|
|
1661
|
+
# Non-DBOS path (MCP servers are already included)
|
|
1662
|
+
result_ = await pydantic_agent.run(
|
|
1663
|
+
prompt_payload,
|
|
1664
|
+
message_history=self.get_message_history(),
|
|
1665
|
+
usage_limits=usage_limits,
|
|
1666
|
+
event_stream_handler=event_stream_handler,
|
|
1667
|
+
**kwargs,
|
|
1668
|
+
)
|
|
1669
|
+
return result_
|
|
1670
|
+
except* UsageLimitExceeded as ule:
|
|
1671
|
+
emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
|
|
1672
|
+
emit_info(
|
|
1673
|
+
"The agent has reached its usage limit. You can ask it to continue by saying 'please continue' or similar.",
|
|
1674
|
+
group_id=group_id,
|
|
1675
|
+
)
|
|
1676
|
+
except* mcp.shared.exceptions.McpError as mcp_error:
|
|
1677
|
+
emit_info(f"MCP server error: {str(mcp_error)}", group_id=group_id)
|
|
1678
|
+
emit_info(f"{str(mcp_error)}", group_id=group_id)
|
|
1679
|
+
emit_info(
|
|
1680
|
+
"Try disabling any malfunctioning MCP servers", group_id=group_id
|
|
1681
|
+
)
|
|
1682
|
+
except* asyncio.exceptions.CancelledError:
|
|
1683
|
+
emit_info("Cancelled")
|
|
1684
|
+
if get_use_dbos():
|
|
1685
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1686
|
+
except* InterruptedError as ie:
|
|
1687
|
+
emit_info(f"Interrupted: {str(ie)}")
|
|
1688
|
+
if get_use_dbos():
|
|
1689
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1690
|
+
except* Exception as other_error:
|
|
1691
|
+
# Filter out CancelledError and UsageLimitExceeded from the exception group - let it propagate
|
|
1692
|
+
remaining_exceptions = []
|
|
1693
|
+
|
|
1694
|
+
def collect_non_cancelled_exceptions(exc):
|
|
1695
|
+
if isinstance(exc, ExceptionGroup):
|
|
1696
|
+
for sub_exc in exc.exceptions:
|
|
1697
|
+
collect_non_cancelled_exceptions(sub_exc)
|
|
1698
|
+
elif not isinstance(
|
|
1699
|
+
exc, (asyncio.CancelledError, UsageLimitExceeded)
|
|
1700
|
+
):
|
|
1701
|
+
remaining_exceptions.append(exc)
|
|
1702
|
+
emit_info(f"Unexpected error: {str(exc)}", group_id=group_id)
|
|
1703
|
+
emit_info(f"{str(exc.args)}", group_id=group_id)
|
|
1704
|
+
# Log to file for debugging
|
|
1705
|
+
log_error(
|
|
1706
|
+
exc,
|
|
1707
|
+
context=f"Agent run (group_id={group_id})",
|
|
1708
|
+
include_traceback=True,
|
|
1709
|
+
)
|
|
1710
|
+
|
|
1711
|
+
collect_non_cancelled_exceptions(other_error)
|
|
1712
|
+
|
|
1713
|
+
# If there are CancelledError exceptions in the group, re-raise them
|
|
1714
|
+
cancelled_exceptions = []
|
|
1715
|
+
|
|
1716
|
+
def collect_cancelled_exceptions(exc):
|
|
1717
|
+
if isinstance(exc, ExceptionGroup):
|
|
1718
|
+
for sub_exc in exc.exceptions:
|
|
1719
|
+
collect_cancelled_exceptions(sub_exc)
|
|
1720
|
+
elif isinstance(exc, asyncio.CancelledError):
|
|
1721
|
+
cancelled_exceptions.append(exc)
|
|
1722
|
+
|
|
1723
|
+
collect_cancelled_exceptions(other_error)
|
|
1724
|
+
finally:
|
|
1725
|
+
self.set_message_history(
|
|
1726
|
+
self.prune_interrupted_tool_calls(self.get_message_history())
|
|
1727
|
+
)
|
|
1728
|
+
|
|
1729
|
+
# Create the task FIRST
|
|
1730
|
+
agent_task = asyncio.create_task(run_agent_task())
|
|
1731
|
+
|
|
1732
|
+
# Import shell process status helper
|
|
1733
|
+
|
|
1734
|
+
loop = asyncio.get_running_loop()
|
|
1735
|
+
|
|
1736
|
+
def schedule_agent_cancel() -> None:
|
|
1737
|
+
from code_puppy.tools.command_runner import _RUNNING_PROCESSES
|
|
1738
|
+
|
|
1739
|
+
if len(_RUNNING_PROCESSES):
|
|
1740
|
+
emit_warning(
|
|
1741
|
+
"Refusing to cancel Agent while a shell command is currently running - press Ctrl+X to cancel the shell command."
|
|
1742
|
+
)
|
|
1743
|
+
return
|
|
1744
|
+
if agent_task.done():
|
|
1745
|
+
return
|
|
1746
|
+
|
|
1747
|
+
# Cancel all active subagent tasks
|
|
1748
|
+
if _active_subagent_tasks:
|
|
1749
|
+
emit_warning(
|
|
1750
|
+
f"Cancelling {len(_active_subagent_tasks)} active subagent task(s)..."
|
|
1751
|
+
)
|
|
1752
|
+
for task in list(
|
|
1753
|
+
_active_subagent_tasks
|
|
1754
|
+
): # Create a copy since we'll be modifying the set
|
|
1755
|
+
if not task.done():
|
|
1756
|
+
loop.call_soon_threadsafe(task.cancel)
|
|
1757
|
+
loop.call_soon_threadsafe(agent_task.cancel)
|
|
1758
|
+
|
|
1759
|
+
def keyboard_interrupt_handler(_sig, _frame):
|
|
1760
|
+
# If we're awaiting user input (e.g., file permission prompt),
|
|
1761
|
+
# don't cancel the agent - let the input() call handle the interrupt naturally
|
|
1762
|
+
if is_awaiting_user_input():
|
|
1763
|
+
# Don't do anything here - let the input() call raise KeyboardInterrupt naturally
|
|
1764
|
+
return
|
|
1765
|
+
|
|
1766
|
+
schedule_agent_cancel()
|
|
1767
|
+
|
|
1768
|
+
def graceful_sigint_handler(_sig, _frame):
|
|
1769
|
+
# When using keyboard-based cancel, SIGINT should be a no-op
|
|
1770
|
+
# (just show a hint to user about the configured cancel key)
|
|
1771
|
+
# Also reset terminal to prevent bricking on Windows+uvx
|
|
1772
|
+
from code_puppy.keymap import get_cancel_agent_display_name
|
|
1773
|
+
from code_puppy.terminal_utils import reset_windows_terminal_full
|
|
1774
|
+
|
|
1775
|
+
# Reset terminal state first to prevent bricking
|
|
1776
|
+
reset_windows_terminal_full()
|
|
1777
|
+
|
|
1778
|
+
cancel_key = get_cancel_agent_display_name()
|
|
1779
|
+
emit_info(f"Use {cancel_key} to cancel the agent task.")
|
|
1780
|
+
|
|
1781
|
+
original_handler = None
|
|
1782
|
+
key_listener_stop_event = None
|
|
1783
|
+
_key_listener_thread = None
|
|
1784
|
+
|
|
1785
|
+
try:
|
|
1786
|
+
if cancel_agent_uses_signal():
|
|
1787
|
+
# Use SIGINT-based cancellation (default Ctrl+C behavior)
|
|
1788
|
+
original_handler = signal.signal(
|
|
1789
|
+
signal.SIGINT, keyboard_interrupt_handler
|
|
1790
|
+
)
|
|
1791
|
+
else:
|
|
1792
|
+
# Use keyboard listener for agent cancellation
|
|
1793
|
+
# Set a graceful SIGINT handler that shows a hint
|
|
1794
|
+
original_handler = signal.signal(signal.SIGINT, graceful_sigint_handler)
|
|
1795
|
+
# Spawn keyboard listener with the cancel agent callback
|
|
1796
|
+
key_listener_stop_event = threading.Event()
|
|
1797
|
+
_key_listener_thread = self._spawn_ctrl_x_key_listener(
|
|
1798
|
+
key_listener_stop_event,
|
|
1799
|
+
on_escape=lambda: None, # Ctrl+X handled by command_runner
|
|
1800
|
+
on_cancel_agent=schedule_agent_cancel,
|
|
1801
|
+
)
|
|
1802
|
+
|
|
1803
|
+
# Wait for the task to complete or be cancelled
|
|
1804
|
+
result = await agent_task
|
|
1805
|
+
|
|
1806
|
+
# Update MCP tool cache after successful run for accurate token estimation
|
|
1807
|
+
if hasattr(self, "_mcp_servers") and self._mcp_servers:
|
|
1808
|
+
try:
|
|
1809
|
+
await self._update_mcp_tool_cache()
|
|
1810
|
+
except Exception:
|
|
1811
|
+
pass # Don't fail the run if cache update fails
|
|
1812
|
+
|
|
1813
|
+
return result
|
|
1814
|
+
except asyncio.CancelledError:
|
|
1815
|
+
agent_task.cancel()
|
|
1816
|
+
except KeyboardInterrupt:
|
|
1817
|
+
# Handle direct keyboard interrupt during await
|
|
1818
|
+
if not agent_task.done():
|
|
1819
|
+
agent_task.cancel()
|
|
1820
|
+
finally:
|
|
1821
|
+
# Stop keyboard listener if it was started
|
|
1822
|
+
if key_listener_stop_event is not None:
|
|
1823
|
+
key_listener_stop_event.set()
|
|
1824
|
+
# Restore original signal handler
|
|
1825
|
+
if (
|
|
1826
|
+
original_handler is not None
|
|
1827
|
+
): # Explicit None check - SIG_DFL can be 0/falsy!
|
|
1828
|
+
signal.signal(signal.SIGINT, original_handler)
|