code-puppy 0.0.172__tar.gz → 0.0.173__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {code_puppy-0.0.172 → code_puppy-0.0.173}/PKG-INFO +1 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agent.py +5 -5
- code_puppy-0.0.173/code_puppy/agents/base_agent.py +512 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/add_command.py +1 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/install_command.py +1 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/config.py +0 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/main.py +2 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/message_history_processor.py +87 -165
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/message_queue.py +4 -4
- code_puppy-0.0.173/code_puppy/state_management.py +58 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/command_runner.py +1 -1
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/app.py +1 -1
- code_puppy-0.0.173/code_puppy/tui_state.py +55 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/pyproject.toml +1 -1
- code_puppy-0.0.172/code_puppy/agents/base_agent.py +0 -125
- code_puppy-0.0.172/code_puppy/state_management.py +0 -159
- {code_puppy-0.0.172 → code_puppy-0.0.173}/.gitignore +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/LICENSE +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/README.md +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/__main__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/agent_code_puppy.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/agent_creator_agent.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/agent_manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/agent_orchestrator.json +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/agent_qa_kitten.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/json_agent.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/agents/runtime_manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/callbacks.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/command_handler.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/file_path_completion.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/load_context_completion.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/base.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/handler.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/help_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/list_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/logs_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/remove_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/restart_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/search_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/start_all_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/start_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/status_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/stop_all_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/stop_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/test_command.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/utils.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/mcp/wizard_utils.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/meta_command_handler.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/model_picker_completion.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/motd.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/prompt_toolkit_completion.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/command_line/utils.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/http_utils.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/async_lifecycle.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/blocking_startup.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/captured_stdio_server.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/circuit_breaker.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/config_wizard.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/dashboard.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/error_isolation.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/examples/retry_example.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/health_monitor.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/managed_server.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/registry.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/retry_manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/server_registry_catalog.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/status_tracker.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/mcp_/system_tools.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/queue_console.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/renderers.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/spinner/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/spinner/console_spinner.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/spinner/spinner_base.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/messaging/spinner/textual_spinner.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/model_factory.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/models.json +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/plugins/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/reopenable_async_client.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/round_robin_model.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/status_display.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/summarization_agent.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/agent_tools.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_control.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_interactions.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_locators.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_navigation.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_screenshot.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_scripts.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/browser_workflows.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/camoufox_manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser/vqa_agent.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_control.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_interactions.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_locators.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_navigation.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_screenshot.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_scripts.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/browser_workflows.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/camoufox_manager.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/common.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/file_modifications.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/file_operations.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tools/tools_content.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/chat_view.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/command_history_modal.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/copy_button.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/custom_widgets.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/human_input_modal.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/input_area.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/sidebar.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/components/status_bar.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/messages.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/models/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/models/chat_message.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/models/command_history.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/models/enums.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/screens/__init__.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/screens/help.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/screens/mcp_install_wizard.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/screens/settings.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/tui/screens/tools.py +0 -0
- {code_puppy-0.0.172 → code_puppy-0.0.173}/code_puppy/version_checker.py +0 -0
|
@@ -7,10 +7,7 @@ from pydantic_ai import Agent
|
|
|
7
7
|
from pydantic_ai.settings import ModelSettings
|
|
8
8
|
from pydantic_ai.usage import UsageLimits
|
|
9
9
|
|
|
10
|
-
from code_puppy.message_history_processor import
|
|
11
|
-
get_model_context_length,
|
|
12
|
-
message_history_accumulator,
|
|
13
|
-
)
|
|
10
|
+
from code_puppy.message_history_processor import message_history_accumulator
|
|
14
11
|
from code_puppy.messaging.message_queue import (
|
|
15
12
|
emit_error,
|
|
16
13
|
emit_info,
|
|
@@ -167,7 +164,10 @@ def reload_code_generation_agent(message_group: str | None):
|
|
|
167
164
|
|
|
168
165
|
# Configure model settings with max_tokens if set
|
|
169
166
|
model_settings_dict = {"seed": 42}
|
|
170
|
-
|
|
167
|
+
# Get current agent to use its method
|
|
168
|
+
from code_puppy.agents import get_current_agent_config
|
|
169
|
+
current_agent = get_current_agent_config()
|
|
170
|
+
output_tokens = max(2048, min(int(0.05 * current_agent.get_model_context_length()) - 1024, 16384))
|
|
171
171
|
console.print(f"Max output tokens per message: {output_tokens}")
|
|
172
172
|
model_settings_dict["max_tokens"] = output_tokens
|
|
173
173
|
|
|
@@ -0,0 +1,512 @@
|
|
|
1
|
+
"""Base agent configuration class for defining agent properties."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import queue
|
|
5
|
+
import uuid
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
|
8
|
+
|
|
9
|
+
import pydantic
|
|
10
|
+
from pydantic_ai.messages import (
|
|
11
|
+
ModelMessage,
|
|
12
|
+
ModelRequest,
|
|
13
|
+
TextPart,
|
|
14
|
+
ToolCallPart,
|
|
15
|
+
ToolCallPartDelta,
|
|
16
|
+
ToolReturn,
|
|
17
|
+
ToolReturnPart,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class BaseAgent(ABC):
|
|
22
|
+
"""Base class for all agent configurations."""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self.id = str(uuid.uuid4())
|
|
26
|
+
self._message_history: List[Any] = []
|
|
27
|
+
self._compacted_message_hashes: Set[str] = set()
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
@abstractmethod
|
|
31
|
+
def name(self) -> str:
|
|
32
|
+
"""Unique identifier for the agent."""
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def display_name(self) -> str:
|
|
38
|
+
"""Human-readable name for the agent."""
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
@abstractmethod
|
|
43
|
+
def description(self) -> str:
|
|
44
|
+
"""Brief description of what this agent does."""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
@abstractmethod
|
|
48
|
+
def get_system_prompt(self) -> str:
|
|
49
|
+
"""Get the system prompt for this agent."""
|
|
50
|
+
pass
|
|
51
|
+
|
|
52
|
+
@abstractmethod
|
|
53
|
+
def get_available_tools(self) -> List[str]:
|
|
54
|
+
"""Get list of tool names that this agent should have access to.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
List of tool names to register for this agent.
|
|
58
|
+
"""
|
|
59
|
+
pass
|
|
60
|
+
|
|
61
|
+
def get_tools_config(self) -> Optional[Dict[str, Any]]:
|
|
62
|
+
"""Get tool configuration for this agent.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Dict with tool configuration, or None to use default tools.
|
|
66
|
+
"""
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
def get_user_prompt(self) -> Optional[str]:
|
|
70
|
+
"""Get custom user prompt for this agent.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Custom prompt string, or None to use default.
|
|
74
|
+
"""
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
# Message history management methods
|
|
78
|
+
def get_message_history(self) -> List[Any]:
|
|
79
|
+
"""Get the message history for this agent.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
List of messages in this agent's conversation history.
|
|
83
|
+
"""
|
|
84
|
+
return self._message_history
|
|
85
|
+
|
|
86
|
+
def set_message_history(self, history: List[Any]) -> None:
|
|
87
|
+
"""Set the message history for this agent.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
history: List of messages to set as the conversation history.
|
|
91
|
+
"""
|
|
92
|
+
self._message_history = history
|
|
93
|
+
|
|
94
|
+
def clear_message_history(self) -> None:
|
|
95
|
+
"""Clear the message history for this agent."""
|
|
96
|
+
self._message_history = []
|
|
97
|
+
self._compacted_message_hashes.clear()
|
|
98
|
+
|
|
99
|
+
def append_to_message_history(self, message: Any) -> None:
|
|
100
|
+
"""Append a message to this agent's history.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
message: Message to append to the conversation history.
|
|
104
|
+
"""
|
|
105
|
+
self._message_history.append(message)
|
|
106
|
+
|
|
107
|
+
def extend_message_history(self, history: List[Any]) -> None:
|
|
108
|
+
"""Extend this agent's message history with multiple messages.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
history: List of messages to append to the conversation history.
|
|
112
|
+
"""
|
|
113
|
+
self._message_history.extend(history)
|
|
114
|
+
|
|
115
|
+
def get_compacted_message_hashes(self) -> Set[str]:
|
|
116
|
+
"""Get the set of compacted message hashes for this agent.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Set of hashes for messages that have been compacted/summarized.
|
|
120
|
+
"""
|
|
121
|
+
return self._compacted_message_hashes
|
|
122
|
+
|
|
123
|
+
def add_compacted_message_hash(self, message_hash: str) -> None:
|
|
124
|
+
"""Add a message hash to the set of compacted message hashes.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
message_hash: Hash of a message that has been compacted/summarized.
|
|
128
|
+
"""
|
|
129
|
+
self._compacted_message_hashes.add(message_hash)
|
|
130
|
+
|
|
131
|
+
def get_model_name(self) -> Optional[str]:
|
|
132
|
+
"""Get pinned model name for this agent, if specified.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Model name to use for this agent, or None to use global default.
|
|
136
|
+
"""
|
|
137
|
+
from ..config import get_agent_pinned_model
|
|
138
|
+
return get_agent_pinned_model(self.name)
|
|
139
|
+
|
|
140
|
+
# Message history processing methods (moved from state_management.py and message_history_processor.py)
|
|
141
|
+
def _stringify_part(self, part: Any) -> str:
|
|
142
|
+
"""Create a stable string representation for a message part.
|
|
143
|
+
|
|
144
|
+
We deliberately ignore timestamps so identical content hashes the same even when
|
|
145
|
+
emitted at different times. This prevents status updates from blowing up the
|
|
146
|
+
history when they are repeated with new timestamps."""
|
|
147
|
+
|
|
148
|
+
attributes: List[str] = [part.__class__.__name__]
|
|
149
|
+
|
|
150
|
+
# Role/instructions help disambiguate parts that otherwise share content
|
|
151
|
+
if hasattr(part, "role") and part.role:
|
|
152
|
+
attributes.append(f"role={part.role}")
|
|
153
|
+
if hasattr(part, "instructions") and part.instructions:
|
|
154
|
+
attributes.append(f"instructions={part.instructions}")
|
|
155
|
+
|
|
156
|
+
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
157
|
+
attributes.append(f"tool_call_id={part.tool_call_id}")
|
|
158
|
+
|
|
159
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
160
|
+
attributes.append(f"tool_name={part.tool_name}")
|
|
161
|
+
|
|
162
|
+
content = getattr(part, "content", None)
|
|
163
|
+
if content is None:
|
|
164
|
+
attributes.append("content=None")
|
|
165
|
+
elif isinstance(content, str):
|
|
166
|
+
attributes.append(f"content={content}")
|
|
167
|
+
elif isinstance(content, pydantic.BaseModel):
|
|
168
|
+
attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
|
|
169
|
+
elif isinstance(content, dict):
|
|
170
|
+
attributes.append(f"content={json.dumps(content, sort_keys=True)}")
|
|
171
|
+
else:
|
|
172
|
+
attributes.append(f"content={repr(content)}")
|
|
173
|
+
result = "|".join(attributes)
|
|
174
|
+
return result
|
|
175
|
+
|
|
176
|
+
def hash_message(self, message: Any) -> int:
|
|
177
|
+
"""Create a stable hash for a model message that ignores timestamps."""
|
|
178
|
+
role = getattr(message, "role", None)
|
|
179
|
+
instructions = getattr(message, "instructions", None)
|
|
180
|
+
header_bits: List[str] = []
|
|
181
|
+
if role:
|
|
182
|
+
header_bits.append(f"role={role}")
|
|
183
|
+
if instructions:
|
|
184
|
+
header_bits.append(f"instructions={instructions}")
|
|
185
|
+
|
|
186
|
+
part_strings = [self._stringify_part(part) for part in getattr(message, "parts", [])]
|
|
187
|
+
canonical = "||".join(header_bits + part_strings)
|
|
188
|
+
return hash(canonical)
|
|
189
|
+
|
|
190
|
+
def stringify_message_part(self, part) -> str:
|
|
191
|
+
"""
|
|
192
|
+
Convert a message part to a string representation for token estimation or other uses.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
part: A message part that may contain content or be a tool call
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
String representation of the message part
|
|
199
|
+
"""
|
|
200
|
+
result = ""
|
|
201
|
+
if hasattr(part, "part_kind"):
|
|
202
|
+
result += part.part_kind + ": "
|
|
203
|
+
else:
|
|
204
|
+
result += str(type(part)) + ": "
|
|
205
|
+
|
|
206
|
+
# Handle content
|
|
207
|
+
if hasattr(part, "content") and part.content:
|
|
208
|
+
# Handle different content types
|
|
209
|
+
if isinstance(part.content, str):
|
|
210
|
+
result = part.content
|
|
211
|
+
elif isinstance(part.content, pydantic.BaseModel):
|
|
212
|
+
result = json.dumps(part.content.model_dump())
|
|
213
|
+
elif isinstance(part.content, dict):
|
|
214
|
+
result = json.dumps(part.content)
|
|
215
|
+
else:
|
|
216
|
+
result = str(part.content)
|
|
217
|
+
|
|
218
|
+
# Handle tool calls which may have additional token costs
|
|
219
|
+
# If part also has content, we'll process tool calls separately
|
|
220
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
221
|
+
# Estimate tokens for tool name and parameters
|
|
222
|
+
tool_text = part.tool_name
|
|
223
|
+
if hasattr(part, "args"):
|
|
224
|
+
tool_text += f" {str(part.args)}"
|
|
225
|
+
result += tool_text
|
|
226
|
+
|
|
227
|
+
return result
|
|
228
|
+
|
|
229
|
+
def estimate_tokens_for_message(self, message: ModelMessage) -> int:
|
|
230
|
+
"""
|
|
231
|
+
Estimate the number of tokens in a message using len(message) - 4.
|
|
232
|
+
Simple and fast replacement for tiktoken.
|
|
233
|
+
"""
|
|
234
|
+
total_tokens = 0
|
|
235
|
+
|
|
236
|
+
for part in message.parts:
|
|
237
|
+
part_str = self.stringify_message_part(part)
|
|
238
|
+
if part_str:
|
|
239
|
+
total_tokens += len(part_str)
|
|
240
|
+
|
|
241
|
+
return int(max(1, total_tokens) / 4)
|
|
242
|
+
|
|
243
|
+
def _is_tool_call_part(self, part: Any) -> bool:
|
|
244
|
+
if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
|
|
245
|
+
return True
|
|
246
|
+
|
|
247
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
248
|
+
if part_kind == "tool-call":
|
|
249
|
+
return True
|
|
250
|
+
|
|
251
|
+
has_tool_name = getattr(part, "tool_name", None) is not None
|
|
252
|
+
has_args = getattr(part, "args", None) is not None
|
|
253
|
+
has_args_delta = getattr(part, "args_delta", None) is not None
|
|
254
|
+
|
|
255
|
+
return bool(has_tool_name and (has_args or has_args_delta))
|
|
256
|
+
|
|
257
|
+
def _is_tool_return_part(self, part: Any) -> bool:
|
|
258
|
+
if isinstance(part, (ToolReturnPart, ToolReturn)):
|
|
259
|
+
return True
|
|
260
|
+
|
|
261
|
+
part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
|
|
262
|
+
if part_kind in {"tool-return", "tool-result"}:
|
|
263
|
+
return True
|
|
264
|
+
|
|
265
|
+
if getattr(part, "tool_call_id", None) is None:
|
|
266
|
+
return False
|
|
267
|
+
|
|
268
|
+
has_content = getattr(part, "content", None) is not None
|
|
269
|
+
has_content_delta = getattr(part, "content_delta", None) is not None
|
|
270
|
+
return bool(has_content or has_content_delta)
|
|
271
|
+
|
|
272
|
+
def filter_huge_messages(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
273
|
+
if not messages:
|
|
274
|
+
return []
|
|
275
|
+
|
|
276
|
+
# Never drop the system prompt, even if it is extremely large.
|
|
277
|
+
system_message, *rest = messages
|
|
278
|
+
filtered_rest = [
|
|
279
|
+
m for m in rest if self.estimate_tokens_for_message(m) < 50000
|
|
280
|
+
]
|
|
281
|
+
return [system_message] + filtered_rest
|
|
282
|
+
|
|
283
|
+
def split_messages_for_protected_summarization(
|
|
284
|
+
self,
|
|
285
|
+
messages: List[ModelMessage],
|
|
286
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
287
|
+
"""
|
|
288
|
+
Split messages into two groups: messages to summarize and protected recent messages.
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Tuple of (messages_to_summarize, protected_messages)
|
|
292
|
+
|
|
293
|
+
The protected_messages are the most recent messages that total up to the configured protected token count.
|
|
294
|
+
The system message (first message) is always protected.
|
|
295
|
+
All other messages that don't fit in the protected zone will be summarized.
|
|
296
|
+
"""
|
|
297
|
+
if len(messages) <= 1: # Just system message or empty
|
|
298
|
+
return [], messages
|
|
299
|
+
|
|
300
|
+
# Always protect the system message (first message)
|
|
301
|
+
system_message = messages[0]
|
|
302
|
+
system_tokens = self.estimate_tokens_for_message(system_message)
|
|
303
|
+
|
|
304
|
+
if len(messages) == 1:
|
|
305
|
+
return [], messages
|
|
306
|
+
|
|
307
|
+
# Get the configured protected token count
|
|
308
|
+
from ..config import get_protected_token_count
|
|
309
|
+
protected_tokens_limit = get_protected_token_count()
|
|
310
|
+
|
|
311
|
+
# Calculate tokens for messages from most recent backwards (excluding system message)
|
|
312
|
+
protected_messages = []
|
|
313
|
+
protected_token_count = system_tokens # Start with system message tokens
|
|
314
|
+
|
|
315
|
+
# Go backwards through non-system messages to find protected zone
|
|
316
|
+
for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
|
|
317
|
+
message = messages[i]
|
|
318
|
+
message_tokens = self.estimate_tokens_for_message(message)
|
|
319
|
+
|
|
320
|
+
# If adding this message would exceed protected tokens, stop here
|
|
321
|
+
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
322
|
+
break
|
|
323
|
+
|
|
324
|
+
protected_messages.append(message)
|
|
325
|
+
protected_token_count += message_tokens
|
|
326
|
+
|
|
327
|
+
# Messages that were added while scanning backwards are currently in reverse order.
|
|
328
|
+
# Reverse them to restore chronological ordering, then prepend the system prompt.
|
|
329
|
+
protected_messages.reverse()
|
|
330
|
+
protected_messages.insert(0, system_message)
|
|
331
|
+
|
|
332
|
+
# Messages to summarize are everything between the system message and the
|
|
333
|
+
# protected tail zone we just constructed.
|
|
334
|
+
protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
|
|
335
|
+
messages_to_summarize = messages[1:protected_start_idx]
|
|
336
|
+
|
|
337
|
+
# Emit info messages
|
|
338
|
+
from ..messaging import emit_info
|
|
339
|
+
emit_info(
|
|
340
|
+
f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
|
|
341
|
+
)
|
|
342
|
+
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
343
|
+
|
|
344
|
+
return messages_to_summarize, protected_messages
|
|
345
|
+
|
|
346
|
+
def summarize_messages(
|
|
347
|
+
self,
|
|
348
|
+
messages: List[ModelMessage],
|
|
349
|
+
with_protection: bool = True
|
|
350
|
+
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
351
|
+
"""
|
|
352
|
+
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
Tuple of (compacted_messages, summarized_source_messages)
|
|
356
|
+
where compacted_messages always preserves the original system message
|
|
357
|
+
as the first entry.
|
|
358
|
+
"""
|
|
359
|
+
messages_to_summarize: List[ModelMessage]
|
|
360
|
+
protected_messages: List[ModelMessage]
|
|
361
|
+
|
|
362
|
+
if with_protection:
|
|
363
|
+
messages_to_summarize, protected_messages = (
|
|
364
|
+
self.split_messages_for_protected_summarization(messages)
|
|
365
|
+
)
|
|
366
|
+
else:
|
|
367
|
+
messages_to_summarize = messages[1:] if messages else []
|
|
368
|
+
protected_messages = messages[:1]
|
|
369
|
+
|
|
370
|
+
if not messages:
|
|
371
|
+
return [], []
|
|
372
|
+
|
|
373
|
+
system_message = messages[0]
|
|
374
|
+
|
|
375
|
+
if not messages_to_summarize:
|
|
376
|
+
# Nothing to summarize, so just return the original sequence
|
|
377
|
+
return self.prune_interrupted_tool_calls(messages), []
|
|
378
|
+
|
|
379
|
+
instructions = (
|
|
380
|
+
"The input will be a log of Agentic AI steps that have been taken"
|
|
381
|
+
" as well as user queries, etc. Summarize the contents of these steps."
|
|
382
|
+
" The high level details should remain but the bulk of the content from tool-call"
|
|
383
|
+
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
384
|
+
" reading a file, and the file contents are large, then in your summary you might just"
|
|
385
|
+
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
386
|
+
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
387
|
+
"\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
try:
|
|
391
|
+
from ..summarization_agent import run_summarization_sync
|
|
392
|
+
new_messages = run_summarization_sync(
|
|
393
|
+
instructions, message_history=messages_to_summarize
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
if not isinstance(new_messages, list):
|
|
397
|
+
from ..messaging import emit_warning
|
|
398
|
+
emit_warning(
|
|
399
|
+
"Summarization agent returned non-list output; wrapping into message request"
|
|
400
|
+
)
|
|
401
|
+
new_messages = [ModelRequest([TextPart(str(new_messages))])]
|
|
402
|
+
|
|
403
|
+
compacted: List[ModelMessage] = [system_message] + list(new_messages)
|
|
404
|
+
|
|
405
|
+
# Drop the system message from protected_messages because we already included it
|
|
406
|
+
protected_tail = [msg for msg in protected_messages if msg is not system_message]
|
|
407
|
+
|
|
408
|
+
compacted.extend(protected_tail)
|
|
409
|
+
|
|
410
|
+
return self.prune_interrupted_tool_calls(compacted), messages_to_summarize
|
|
411
|
+
except Exception as e:
|
|
412
|
+
from ..messaging import emit_error
|
|
413
|
+
emit_error(f"Summarization failed during compaction: {e}")
|
|
414
|
+
return messages, [] # Return original messages on failure
|
|
415
|
+
|
|
416
|
+
def summarize_message(self, message: ModelMessage) -> ModelMessage:
|
|
417
|
+
try:
|
|
418
|
+
# If the message looks like a system/instructions message, skip summarization
|
|
419
|
+
instructions = getattr(message, "instructions", None)
|
|
420
|
+
if instructions:
|
|
421
|
+
return message
|
|
422
|
+
# If any part is a tool call, skip summarization
|
|
423
|
+
for part in message.parts:
|
|
424
|
+
if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
|
|
425
|
+
return message
|
|
426
|
+
# Build prompt from textual content parts
|
|
427
|
+
content_bits: List[str] = []
|
|
428
|
+
for part in message.parts:
|
|
429
|
+
s = self.stringify_message_part(part)
|
|
430
|
+
if s:
|
|
431
|
+
content_bits.append(s)
|
|
432
|
+
if not content_bits:
|
|
433
|
+
return message
|
|
434
|
+
prompt = "Please summarize the following user message:\n" + "\n".join(
|
|
435
|
+
content_bits
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
from ..summarization_agent import run_summarization_sync
|
|
439
|
+
output_text = run_summarization_sync(prompt)
|
|
440
|
+
summarized = ModelRequest([TextPart(output_text)])
|
|
441
|
+
return summarized
|
|
442
|
+
except Exception as e:
|
|
443
|
+
from ..messaging import emit_error
|
|
444
|
+
emit_error(f"Summarization failed: {e}")
|
|
445
|
+
return message
|
|
446
|
+
|
|
447
|
+
def get_model_context_length(self) -> int:
|
|
448
|
+
"""
|
|
449
|
+
Get the context length for the currently configured model from models.json
|
|
450
|
+
"""
|
|
451
|
+
from ..config import get_model_name
|
|
452
|
+
from ..model_factory import ModelFactory
|
|
453
|
+
|
|
454
|
+
model_configs = ModelFactory.load_config()
|
|
455
|
+
model_name = get_model_name()
|
|
456
|
+
|
|
457
|
+
# Get context length from model config
|
|
458
|
+
model_config = model_configs.get(model_name, {})
|
|
459
|
+
context_length = model_config.get("context_length", 128000) # Default value
|
|
460
|
+
|
|
461
|
+
return int(context_length)
|
|
462
|
+
|
|
463
|
+
def prune_interrupted_tool_calls(self, messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
464
|
+
"""
|
|
465
|
+
Remove any messages that participate in mismatched tool call sequences.
|
|
466
|
+
|
|
467
|
+
A mismatched tool call id is one that appears in a ToolCall (model/tool request)
|
|
468
|
+
without a corresponding tool return, or vice versa. We preserve original order
|
|
469
|
+
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
470
|
+
"""
|
|
471
|
+
if not messages:
|
|
472
|
+
return messages
|
|
473
|
+
|
|
474
|
+
tool_call_ids: Set[str] = set()
|
|
475
|
+
tool_return_ids: Set[str] = set()
|
|
476
|
+
|
|
477
|
+
# First pass: collect ids for calls vs returns
|
|
478
|
+
for msg in messages:
|
|
479
|
+
for part in getattr(msg, "parts", []) or []:
|
|
480
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
481
|
+
if not tool_call_id:
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
if self._is_tool_call_part(part) and not self._is_tool_return_part(part):
|
|
485
|
+
tool_call_ids.add(tool_call_id)
|
|
486
|
+
elif self._is_tool_return_part(part):
|
|
487
|
+
tool_return_ids.add(tool_call_id)
|
|
488
|
+
|
|
489
|
+
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
490
|
+
if not mismatched:
|
|
491
|
+
return messages
|
|
492
|
+
|
|
493
|
+
pruned: List[ModelMessage] = []
|
|
494
|
+
dropped_count = 0
|
|
495
|
+
for msg in messages:
|
|
496
|
+
has_mismatched = False
|
|
497
|
+
for part in getattr(msg, "parts", []) or []:
|
|
498
|
+
tcid = getattr(part, "tool_call_id", None)
|
|
499
|
+
if tcid and tcid in mismatched:
|
|
500
|
+
has_mismatched = True
|
|
501
|
+
break
|
|
502
|
+
if has_mismatched:
|
|
503
|
+
dropped_count += 1
|
|
504
|
+
continue
|
|
505
|
+
pruned.append(msg)
|
|
506
|
+
|
|
507
|
+
if dropped_count:
|
|
508
|
+
from ..messaging import emit_warning
|
|
509
|
+
emit_warning(
|
|
510
|
+
f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
|
|
511
|
+
)
|
|
512
|
+
return pruned
|
|
@@ -8,7 +8,7 @@ import os
|
|
|
8
8
|
from typing import List, Optional
|
|
9
9
|
|
|
10
10
|
from code_puppy.messaging import emit_info
|
|
11
|
-
from code_puppy.
|
|
11
|
+
from code_puppy.tui_state import is_tui_mode
|
|
12
12
|
|
|
13
13
|
from .base import MCPCommandBase
|
|
14
14
|
from .wizard_utils import run_interactive_install_wizard
|
|
@@ -6,7 +6,7 @@ import logging
|
|
|
6
6
|
from typing import List, Optional
|
|
7
7
|
|
|
8
8
|
from code_puppy.messaging import emit_info
|
|
9
|
-
from code_puppy.
|
|
9
|
+
from code_puppy.tui_state import is_tui_mode
|
|
10
10
|
|
|
11
11
|
from .base import MCPCommandBase
|
|
12
12
|
from .wizard_utils import run_interactive_install_wizard
|
|
@@ -29,7 +29,8 @@ from code_puppy.message_history_processor import (
|
|
|
29
29
|
message_history_accumulator,
|
|
30
30
|
prune_interrupted_tool_calls,
|
|
31
31
|
)
|
|
32
|
-
from code_puppy.state_management import
|
|
32
|
+
from code_puppy.state_management import set_message_history
|
|
33
|
+
from code_puppy.tui_state import is_tui_mode, set_tui_mode
|
|
33
34
|
from code_puppy.tools.common import console
|
|
34
35
|
from code_puppy.version_checker import default_version_mismatch_behavior
|
|
35
36
|
|