code-puppy 0.0.169__py3-none-any.whl → 0.0.366__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +7 -1
- code_puppy/agents/__init__.py +8 -8
- code_puppy/agents/agent_c_reviewer.py +155 -0
- code_puppy/agents/agent_code_puppy.py +9 -2
- code_puppy/agents/agent_code_reviewer.py +90 -0
- code_puppy/agents/agent_cpp_reviewer.py +132 -0
- code_puppy/agents/agent_creator_agent.py +48 -9
- code_puppy/agents/agent_golang_reviewer.py +151 -0
- code_puppy/agents/agent_javascript_reviewer.py +160 -0
- code_puppy/agents/agent_manager.py +146 -199
- code_puppy/agents/agent_pack_leader.py +383 -0
- code_puppy/agents/agent_planning.py +163 -0
- code_puppy/agents/agent_python_programmer.py +165 -0
- code_puppy/agents/agent_python_reviewer.py +90 -0
- code_puppy/agents/agent_qa_expert.py +163 -0
- code_puppy/agents/agent_qa_kitten.py +208 -0
- code_puppy/agents/agent_security_auditor.py +181 -0
- code_puppy/agents/agent_terminal_qa.py +323 -0
- code_puppy/agents/agent_typescript_reviewer.py +166 -0
- code_puppy/agents/base_agent.py +1713 -1
- code_puppy/agents/event_stream_handler.py +350 -0
- code_puppy/agents/json_agent.py +12 -1
- code_puppy/agents/pack/__init__.py +34 -0
- code_puppy/agents/pack/bloodhound.py +304 -0
- code_puppy/agents/pack/husky.py +321 -0
- code_puppy/agents/pack/retriever.py +393 -0
- code_puppy/agents/pack/shepherd.py +348 -0
- code_puppy/agents/pack/terrier.py +287 -0
- code_puppy/agents/pack/watchdog.py +367 -0
- code_puppy/agents/prompt_reviewer.py +145 -0
- code_puppy/agents/subagent_stream_handler.py +276 -0
- code_puppy/api/__init__.py +13 -0
- code_puppy/api/app.py +169 -0
- code_puppy/api/main.py +21 -0
- code_puppy/api/pty_manager.py +446 -0
- code_puppy/api/routers/__init__.py +12 -0
- code_puppy/api/routers/agents.py +36 -0
- code_puppy/api/routers/commands.py +217 -0
- code_puppy/api/routers/config.py +74 -0
- code_puppy/api/routers/sessions.py +232 -0
- code_puppy/api/templates/terminal.html +361 -0
- code_puppy/api/websocket.py +154 -0
- code_puppy/callbacks.py +174 -4
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/claude_cache_client.py +586 -0
- code_puppy/cli_runner.py +916 -0
- code_puppy/command_line/add_model_menu.py +1079 -0
- code_puppy/command_line/agent_menu.py +395 -0
- code_puppy/command_line/attachments.py +395 -0
- code_puppy/command_line/autosave_menu.py +605 -0
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/colors_menu.py +520 -0
- code_puppy/command_line/command_handler.py +233 -627
- code_puppy/command_line/command_registry.py +150 -0
- code_puppy/command_line/config_commands.py +715 -0
- code_puppy/command_line/core_commands.py +792 -0
- code_puppy/command_line/diff_menu.py +863 -0
- code_puppy/command_line/load_context_completion.py +15 -22
- code_puppy/command_line/mcp/base.py +1 -4
- code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
- code_puppy/command_line/mcp/custom_server_form.py +688 -0
- code_puppy/command_line/mcp/custom_server_installer.py +195 -0
- code_puppy/command_line/mcp/edit_command.py +148 -0
- code_puppy/command_line/mcp/handler.py +9 -4
- code_puppy/command_line/mcp/help_command.py +6 -5
- code_puppy/command_line/mcp/install_command.py +16 -27
- code_puppy/command_line/mcp/install_menu.py +685 -0
- code_puppy/command_line/mcp/list_command.py +3 -3
- code_puppy/command_line/mcp/logs_command.py +174 -65
- code_puppy/command_line/mcp/remove_command.py +2 -2
- code_puppy/command_line/mcp/restart_command.py +12 -4
- code_puppy/command_line/mcp/search_command.py +17 -11
- code_puppy/command_line/mcp/start_all_command.py +22 -13
- code_puppy/command_line/mcp/start_command.py +50 -31
- code_puppy/command_line/mcp/status_command.py +6 -7
- code_puppy/command_line/mcp/stop_all_command.py +11 -8
- code_puppy/command_line/mcp/stop_command.py +11 -10
- code_puppy/command_line/mcp/test_command.py +2 -2
- code_puppy/command_line/mcp/utils.py +1 -1
- code_puppy/command_line/mcp/wizard_utils.py +22 -18
- code_puppy/command_line/mcp_completion.py +174 -0
- code_puppy/command_line/model_picker_completion.py +89 -30
- code_puppy/command_line/model_settings_menu.py +884 -0
- code_puppy/command_line/motd.py +14 -8
- code_puppy/command_line/onboarding_slides.py +179 -0
- code_puppy/command_line/onboarding_wizard.py +340 -0
- code_puppy/command_line/pin_command_completion.py +329 -0
- code_puppy/command_line/prompt_toolkit_completion.py +626 -75
- code_puppy/command_line/session_commands.py +296 -0
- code_puppy/command_line/utils.py +54 -0
- code_puppy/config.py +1181 -51
- code_puppy/error_logging.py +118 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/gemini_model.py +602 -0
- code_puppy/http_utils.py +220 -104
- code_puppy/keymap.py +128 -0
- code_puppy/main.py +5 -594
- code_puppy/{mcp → mcp_}/__init__.py +17 -0
- code_puppy/{mcp → mcp_}/async_lifecycle.py +35 -4
- code_puppy/{mcp → mcp_}/blocking_startup.py +70 -43
- code_puppy/{mcp → mcp_}/captured_stdio_server.py +2 -2
- code_puppy/{mcp → mcp_}/config_wizard.py +5 -5
- code_puppy/{mcp → mcp_}/dashboard.py +15 -6
- code_puppy/{mcp → mcp_}/examples/retry_example.py +4 -1
- code_puppy/{mcp → mcp_}/managed_server.py +66 -39
- code_puppy/{mcp → mcp_}/manager.py +146 -52
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/{mcp → mcp_}/registry.py +6 -6
- code_puppy/{mcp → mcp_}/server_registry_catalog.py +25 -8
- code_puppy/messaging/__init__.py +199 -2
- code_puppy/messaging/bus.py +610 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +17 -48
- code_puppy/messaging/messages.py +500 -0
- code_puppy/messaging/queue_console.py +1 -24
- code_puppy/messaging/renderers.py +43 -146
- code_puppy/messaging/rich_renderer.py +1027 -0
- code_puppy/messaging/spinner/__init__.py +33 -5
- code_puppy/messaging/spinner/console_spinner.py +92 -52
- code_puppy/messaging/spinner/spinner_base.py +29 -0
- code_puppy/messaging/subagent_console.py +461 -0
- code_puppy/model_factory.py +686 -80
- code_puppy/model_utils.py +167 -0
- code_puppy/models.json +86 -104
- code_puppy/models_dev_api.json +1 -0
- code_puppy/models_dev_parser.py +592 -0
- code_puppy/plugins/__init__.py +164 -10
- code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
- code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +704 -0
- code_puppy/plugins/antigravity_oauth/config.py +42 -0
- code_puppy/plugins/antigravity_oauth/constants.py +136 -0
- code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
- code_puppy/plugins/antigravity_oauth/storage.py +271 -0
- code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
- code_puppy/plugins/antigravity_oauth/token.py +167 -0
- code_puppy/plugins/antigravity_oauth/transport.py +767 -0
- code_puppy/plugins/antigravity_oauth/utils.py +169 -0
- code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
- code_puppy/plugins/chatgpt_oauth/config.py +52 -0
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +328 -0
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +94 -0
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +293 -0
- code_puppy/plugins/chatgpt_oauth/utils.py +489 -0
- code_puppy/plugins/claude_code_oauth/README.md +167 -0
- code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
- code_puppy/plugins/claude_code_oauth/__init__.py +6 -0
- code_puppy/plugins/claude_code_oauth/config.py +50 -0
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +308 -0
- code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_puppy/plugins/claude_code_oauth/utils.py +518 -0
- code_puppy/plugins/customizable_commands/__init__.py +0 -0
- code_puppy/plugins/customizable_commands/register_callbacks.py +169 -0
- code_puppy/plugins/example_custom_command/README.md +280 -0
- code_puppy/plugins/example_custom_command/register_callbacks.py +51 -0
- code_puppy/plugins/file_permission_handler/__init__.py +4 -0
- code_puppy/plugins/file_permission_handler/register_callbacks.py +523 -0
- code_puppy/plugins/frontend_emitter/__init__.py +25 -0
- code_puppy/plugins/frontend_emitter/emitter.py +121 -0
- code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
- code_puppy/plugins/oauth_puppy_html.py +228 -0
- code_puppy/plugins/shell_safety/__init__.py +6 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
- code_puppy/prompts/antigravity_system_prompt.md +1 -0
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/reopenable_async_client.py +8 -8
- code_puppy/round_robin_model.py +10 -15
- code_puppy/session_storage.py +294 -0
- code_puppy/status_display.py +21 -4
- code_puppy/summarization_agent.py +52 -14
- code_puppy/terminal_utils.py +418 -0
- code_puppy/tools/__init__.py +139 -6
- code_puppy/tools/agent_tools.py +548 -49
- code_puppy/tools/browser/__init__.py +37 -0
- code_puppy/tools/browser/browser_control.py +289 -0
- code_puppy/tools/browser/browser_interactions.py +545 -0
- code_puppy/tools/browser/browser_locators.py +640 -0
- code_puppy/tools/browser/browser_manager.py +316 -0
- code_puppy/tools/browser/browser_navigation.py +251 -0
- code_puppy/tools/browser/browser_screenshot.py +179 -0
- code_puppy/tools/browser/browser_scripts.py +462 -0
- code_puppy/tools/browser/browser_workflows.py +221 -0
- code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
- code_puppy/tools/browser/terminal_command_tools.py +521 -0
- code_puppy/tools/browser/terminal_screenshot_tools.py +556 -0
- code_puppy/tools/browser/terminal_tools.py +525 -0
- code_puppy/tools/command_runner.py +941 -153
- code_puppy/tools/common.py +1146 -6
- code_puppy/tools/display.py +84 -0
- code_puppy/tools/file_modifications.py +288 -89
- code_puppy/tools/file_operations.py +352 -266
- code_puppy/tools/subagent_context.py +158 -0
- code_puppy/uvx_detection.py +242 -0
- code_puppy/version_checker.py +30 -11
- code_puppy-0.0.366.data/data/code_puppy/models.json +110 -0
- code_puppy-0.0.366.data/data/code_puppy/models_dev_api.json +1 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/METADATA +184 -67
- code_puppy-0.0.366.dist-info/RECORD +217 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/WHEEL +1 -1
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/entry_points.txt +1 -0
- code_puppy/agent.py +0 -231
- code_puppy/agents/agent_orchestrator.json +0 -26
- code_puppy/agents/runtime_manager.py +0 -272
- code_puppy/command_line/mcp/add_command.py +0 -183
- code_puppy/command_line/meta_command_handler.py +0 -153
- code_puppy/message_history_processor.py +0 -490
- code_puppy/messaging/spinner/textual_spinner.py +0 -101
- code_puppy/state_management.py +0 -200
- code_puppy/tui/__init__.py +0 -10
- code_puppy/tui/app.py +0 -986
- code_puppy/tui/components/__init__.py +0 -21
- code_puppy/tui/components/chat_view.py +0 -550
- code_puppy/tui/components/command_history_modal.py +0 -218
- code_puppy/tui/components/copy_button.py +0 -139
- code_puppy/tui/components/custom_widgets.py +0 -63
- code_puppy/tui/components/human_input_modal.py +0 -175
- code_puppy/tui/components/input_area.py +0 -167
- code_puppy/tui/components/sidebar.py +0 -309
- code_puppy/tui/components/status_bar.py +0 -182
- code_puppy/tui/messages.py +0 -27
- code_puppy/tui/models/__init__.py +0 -8
- code_puppy/tui/models/chat_message.py +0 -25
- code_puppy/tui/models/command_history.py +0 -89
- code_puppy/tui/models/enums.py +0 -24
- code_puppy/tui/screens/__init__.py +0 -15
- code_puppy/tui/screens/help.py +0 -130
- code_puppy/tui/screens/mcp_install_wizard.py +0 -803
- code_puppy/tui/screens/settings.py +0 -290
- code_puppy/tui/screens/tools.py +0 -74
- code_puppy-0.0.169.data/data/code_puppy/models.json +0 -128
- code_puppy-0.0.169.dist-info/RECORD +0 -112
- /code_puppy/{mcp → mcp_}/circuit_breaker.py +0 -0
- /code_puppy/{mcp → mcp_}/error_isolation.py +0 -0
- /code_puppy/{mcp → mcp_}/health_monitor.py +0 -0
- /code_puppy/{mcp → mcp_}/retry_manager.py +0 -0
- /code_puppy/{mcp → mcp_}/status_tracker.py +0 -0
- /code_puppy/{mcp → mcp_}/system_tools.py +0 -0
- {code_puppy-0.0.169.dist-info → code_puppy-0.0.366.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,490 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import queue
|
|
3
|
-
from typing import Any, List, Set, Tuple
|
|
4
|
-
|
|
5
|
-
import pydantic
|
|
6
|
-
from pydantic_ai.messages import ModelMessage, ModelRequest, TextPart, ToolCallPart
|
|
7
|
-
|
|
8
|
-
from code_puppy.config import (
|
|
9
|
-
get_model_name,
|
|
10
|
-
get_protected_token_count,
|
|
11
|
-
get_compaction_threshold,
|
|
12
|
-
get_compaction_strategy,
|
|
13
|
-
)
|
|
14
|
-
from code_puppy.messaging import emit_error, emit_info, emit_warning
|
|
15
|
-
from code_puppy.model_factory import ModelFactory
|
|
16
|
-
from code_puppy.state_management import (
|
|
17
|
-
add_compacted_message_hash,
|
|
18
|
-
get_compacted_message_hashes,
|
|
19
|
-
get_message_history,
|
|
20
|
-
hash_message,
|
|
21
|
-
set_message_history,
|
|
22
|
-
)
|
|
23
|
-
from code_puppy.summarization_agent import run_summarization_sync
|
|
24
|
-
|
|
25
|
-
# Protected tokens are now configurable via get_protected_token_count()
|
|
26
|
-
# Default is 50000 but can be customized in ~/.code_puppy/puppy.cfg
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def stringify_message_part(part) -> str:
|
|
30
|
-
"""
|
|
31
|
-
Convert a message part to a string representation for token estimation or other uses.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
part: A message part that may contain content or be a tool call
|
|
35
|
-
|
|
36
|
-
Returns:
|
|
37
|
-
String representation of the message part
|
|
38
|
-
"""
|
|
39
|
-
result = ""
|
|
40
|
-
if hasattr(part, "part_kind"):
|
|
41
|
-
result += part.part_kind + ": "
|
|
42
|
-
else:
|
|
43
|
-
result += str(type(part)) + ": "
|
|
44
|
-
|
|
45
|
-
# Handle content
|
|
46
|
-
if hasattr(part, "content") and part.content:
|
|
47
|
-
# Handle different content types
|
|
48
|
-
if isinstance(part.content, str):
|
|
49
|
-
result = part.content
|
|
50
|
-
elif isinstance(part.content, pydantic.BaseModel):
|
|
51
|
-
result = json.dumps(part.content.model_dump())
|
|
52
|
-
elif isinstance(part.content, dict):
|
|
53
|
-
result = json.dumps(part.content)
|
|
54
|
-
else:
|
|
55
|
-
result = str(part.content)
|
|
56
|
-
|
|
57
|
-
# Handle tool calls which may have additional token costs
|
|
58
|
-
# If part also has content, we'll process tool calls separately
|
|
59
|
-
if hasattr(part, "tool_name") and part.tool_name:
|
|
60
|
-
# Estimate tokens for tool name and parameters
|
|
61
|
-
tool_text = part.tool_name
|
|
62
|
-
if hasattr(part, "args"):
|
|
63
|
-
tool_text += f" {str(part.args)}"
|
|
64
|
-
result += tool_text
|
|
65
|
-
|
|
66
|
-
return result
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
70
|
-
"""
|
|
71
|
-
Estimate the number of tokens in a message using len(message) - 4.
|
|
72
|
-
Simple and fast replacement for tiktoken.
|
|
73
|
-
"""
|
|
74
|
-
total_tokens = 0
|
|
75
|
-
|
|
76
|
-
for part in message.parts:
|
|
77
|
-
part_str = stringify_message_part(part)
|
|
78
|
-
if part_str:
|
|
79
|
-
total_tokens += len(part_str)
|
|
80
|
-
|
|
81
|
-
return int(max(1, total_tokens) / 4)
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
85
|
-
filtered = [m for m in messages if estimate_tokens_for_message(m) < 50000]
|
|
86
|
-
pruned = prune_interrupted_tool_calls(filtered)
|
|
87
|
-
return pruned
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def split_messages_for_protected_summarization(
|
|
91
|
-
messages: List[ModelMessage],
|
|
92
|
-
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
93
|
-
"""
|
|
94
|
-
Split messages into two groups: messages to summarize and protected recent messages.
|
|
95
|
-
|
|
96
|
-
Returns:
|
|
97
|
-
Tuple of (messages_to_summarize, protected_messages)
|
|
98
|
-
|
|
99
|
-
The protected_messages are the most recent messages that total up to the configured protected token count.
|
|
100
|
-
The system message (first message) is always protected.
|
|
101
|
-
All other messages that don't fit in the protected zone will be summarized.
|
|
102
|
-
"""
|
|
103
|
-
if len(messages) <= 1: # Just system message or empty
|
|
104
|
-
return [], messages
|
|
105
|
-
|
|
106
|
-
# Always protect the system message (first message)
|
|
107
|
-
system_message = messages[0]
|
|
108
|
-
system_tokens = estimate_tokens_for_message(system_message)
|
|
109
|
-
|
|
110
|
-
if len(messages) == 1:
|
|
111
|
-
return [], messages
|
|
112
|
-
|
|
113
|
-
# Get the configured protected token count
|
|
114
|
-
protected_tokens_limit = get_protected_token_count()
|
|
115
|
-
|
|
116
|
-
# Calculate tokens for messages from most recent backwards (excluding system message)
|
|
117
|
-
protected_messages = []
|
|
118
|
-
protected_token_count = system_tokens # Start with system message tokens
|
|
119
|
-
|
|
120
|
-
# Go backwards through non-system messages to find protected zone
|
|
121
|
-
for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
|
|
122
|
-
message = messages[i]
|
|
123
|
-
message_tokens = estimate_tokens_for_message(message)
|
|
124
|
-
|
|
125
|
-
# If adding this message would exceed protected tokens, stop here
|
|
126
|
-
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
127
|
-
break
|
|
128
|
-
|
|
129
|
-
protected_messages.insert(0, message) # Insert at beginning to maintain order
|
|
130
|
-
protected_token_count += message_tokens
|
|
131
|
-
|
|
132
|
-
# Add system message at the beginning of protected messages
|
|
133
|
-
protected_messages.insert(0, system_message)
|
|
134
|
-
|
|
135
|
-
# Messages to summarize are everything between system message and protected zone
|
|
136
|
-
protected_start_idx = (
|
|
137
|
-
len(messages) - len(protected_messages) + 1
|
|
138
|
-
) # +1 because system message is protected
|
|
139
|
-
messages_to_summarize = messages[
|
|
140
|
-
1:protected_start_idx
|
|
141
|
-
] # Start from 1 to skip system message
|
|
142
|
-
|
|
143
|
-
emit_info(
|
|
144
|
-
f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
|
|
145
|
-
)
|
|
146
|
-
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
147
|
-
|
|
148
|
-
return messages_to_summarize, protected_messages
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def deduplicate_tool_returns(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
152
|
-
"""
|
|
153
|
-
Remove duplicate tool returns while preserving the first occurrence for each tool_call_id.
|
|
154
|
-
|
|
155
|
-
This function identifies tool-return parts that share the same tool_call_id and
|
|
156
|
-
removes duplicates, keeping only the first return for each id. This prevents
|
|
157
|
-
conversation corruption from duplicate tool_result blocks.
|
|
158
|
-
"""
|
|
159
|
-
if not messages:
|
|
160
|
-
return messages
|
|
161
|
-
|
|
162
|
-
seen_tool_returns: Set[str] = set()
|
|
163
|
-
deduplicated: List[ModelMessage] = []
|
|
164
|
-
removed_count = 0
|
|
165
|
-
|
|
166
|
-
for msg in messages:
|
|
167
|
-
# Check if this message has any parts we need to filter
|
|
168
|
-
if not hasattr(msg, "parts") or not msg.parts:
|
|
169
|
-
deduplicated.append(msg)
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
|
-
# Filter parts within this message
|
|
173
|
-
filtered_parts = []
|
|
174
|
-
msg_had_duplicates = False
|
|
175
|
-
|
|
176
|
-
for part in msg.parts:
|
|
177
|
-
tool_call_id = getattr(part, "tool_call_id", None)
|
|
178
|
-
part_kind = getattr(part, "part_kind", None)
|
|
179
|
-
|
|
180
|
-
# Check if this is a tool-return part
|
|
181
|
-
if tool_call_id and part_kind in {
|
|
182
|
-
"tool-return",
|
|
183
|
-
"tool-result",
|
|
184
|
-
"tool_result",
|
|
185
|
-
}:
|
|
186
|
-
if tool_call_id in seen_tool_returns:
|
|
187
|
-
# This is a duplicate return, skip it
|
|
188
|
-
msg_had_duplicates = True
|
|
189
|
-
removed_count += 1
|
|
190
|
-
continue
|
|
191
|
-
else:
|
|
192
|
-
# First occurrence of this return, keep it
|
|
193
|
-
seen_tool_returns.add(tool_call_id)
|
|
194
|
-
filtered_parts.append(part)
|
|
195
|
-
else:
|
|
196
|
-
# Not a tool return, always keep
|
|
197
|
-
filtered_parts.append(part)
|
|
198
|
-
|
|
199
|
-
# If we filtered out parts, create a new message with filtered parts
|
|
200
|
-
if msg_had_duplicates and filtered_parts:
|
|
201
|
-
# Create a new message with the same attributes but filtered parts
|
|
202
|
-
new_msg = type(msg)(parts=filtered_parts)
|
|
203
|
-
# Copy over other attributes if they exist
|
|
204
|
-
for attr_name in dir(msg):
|
|
205
|
-
if (
|
|
206
|
-
not attr_name.startswith("_")
|
|
207
|
-
and attr_name != "parts"
|
|
208
|
-
and hasattr(msg, attr_name)
|
|
209
|
-
):
|
|
210
|
-
try:
|
|
211
|
-
setattr(new_msg, attr_name, getattr(msg, attr_name))
|
|
212
|
-
except (AttributeError, TypeError):
|
|
213
|
-
# Skip attributes that can't be set
|
|
214
|
-
pass
|
|
215
|
-
deduplicated.append(new_msg)
|
|
216
|
-
elif filtered_parts: # No duplicates but has parts
|
|
217
|
-
deduplicated.append(msg)
|
|
218
|
-
# If no parts remain after filtering, drop the entire message
|
|
219
|
-
|
|
220
|
-
if removed_count > 0:
|
|
221
|
-
emit_warning(f"Removed {removed_count} duplicate tool-return part(s)")
|
|
222
|
-
|
|
223
|
-
return deduplicated
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def summarize_messages(
|
|
227
|
-
messages: List[ModelMessage], with_protection=True
|
|
228
|
-
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
229
|
-
"""
|
|
230
|
-
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
|
231
|
-
|
|
232
|
-
Returns:
|
|
233
|
-
List of messages: [system_message, summary_of_old_messages, ...protected_recent_messages]
|
|
234
|
-
"""
|
|
235
|
-
messages_to_summarize, protected_messages = messages, []
|
|
236
|
-
if with_protection:
|
|
237
|
-
messages_to_summarize, protected_messages = (
|
|
238
|
-
split_messages_for_protected_summarization(messages)
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
if not messages_to_summarize:
|
|
242
|
-
# Nothing to summarize, return protected messages as-is
|
|
243
|
-
return protected_messages, messages_to_summarize
|
|
244
|
-
|
|
245
|
-
instructions = (
|
|
246
|
-
"The input will be a log of Agentic AI steps that have been taken"
|
|
247
|
-
" as well as user queries, etc. Summarize the contents of these steps."
|
|
248
|
-
" The high level details should remain but the bulk of the content from tool-call"
|
|
249
|
-
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
250
|
-
" reading a file, and the file contents are large, then in your summary you might just"
|
|
251
|
-
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
252
|
-
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
253
|
-
"\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
try:
|
|
257
|
-
new_messages = run_summarization_sync(
|
|
258
|
-
instructions, message_history=messages_to_summarize
|
|
259
|
-
)
|
|
260
|
-
# Return: [system_message, summary, ...protected_recent_messages]
|
|
261
|
-
result = new_messages + protected_messages[1:]
|
|
262
|
-
return prune_interrupted_tool_calls(result), messages_to_summarize
|
|
263
|
-
except Exception as e:
|
|
264
|
-
emit_error(f"Summarization failed during compaction: {e}")
|
|
265
|
-
return messages, messages_to_summarize # Return original messages on failure
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
def summarize_message(message: ModelMessage) -> ModelMessage:
|
|
269
|
-
try:
|
|
270
|
-
# If the message looks like a system/instructions message, skip summarization
|
|
271
|
-
instructions = getattr(message, "instructions", None)
|
|
272
|
-
if instructions:
|
|
273
|
-
return message
|
|
274
|
-
# If any part is a tool call, skip summarization
|
|
275
|
-
for part in message.parts:
|
|
276
|
-
if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
|
|
277
|
-
return message
|
|
278
|
-
# Build prompt from textual content parts
|
|
279
|
-
content_bits: List[str] = []
|
|
280
|
-
for part in message.parts:
|
|
281
|
-
s = stringify_message_part(part)
|
|
282
|
-
if s:
|
|
283
|
-
content_bits.append(s)
|
|
284
|
-
if not content_bits:
|
|
285
|
-
return message
|
|
286
|
-
prompt = "Please summarize the following user message:\n" + "\n".join(
|
|
287
|
-
content_bits
|
|
288
|
-
)
|
|
289
|
-
output_text = run_summarization_sync(prompt)
|
|
290
|
-
summarized = ModelRequest([TextPart(output_text)])
|
|
291
|
-
return summarized
|
|
292
|
-
except Exception as e:
|
|
293
|
-
emit_error(f"Summarization failed: {e}")
|
|
294
|
-
return message
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
def get_model_context_length() -> int:
|
|
298
|
-
"""
|
|
299
|
-
Get the context length for the currently configured model from models.json
|
|
300
|
-
"""
|
|
301
|
-
model_configs = ModelFactory.load_config()
|
|
302
|
-
model_name = get_model_name()
|
|
303
|
-
|
|
304
|
-
# Get context length from model config
|
|
305
|
-
model_config = model_configs.get(model_name, {})
|
|
306
|
-
context_length = model_config.get("context_length", 128000) # Default value
|
|
307
|
-
|
|
308
|
-
# Reserve 10% of context for response
|
|
309
|
-
return int(context_length)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
313
|
-
"""
|
|
314
|
-
Remove any messages that participate in mismatched tool call sequences.
|
|
315
|
-
|
|
316
|
-
A mismatched tool call id is one that appears in a ToolCall (model/tool request)
|
|
317
|
-
without a corresponding tool return, or vice versa. We preserve original order
|
|
318
|
-
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
319
|
-
"""
|
|
320
|
-
if not messages:
|
|
321
|
-
return messages
|
|
322
|
-
|
|
323
|
-
tool_call_ids: Set[str] = set()
|
|
324
|
-
tool_return_ids: Set[str] = set()
|
|
325
|
-
|
|
326
|
-
# First pass: collect ids for calls vs returns
|
|
327
|
-
for msg in messages:
|
|
328
|
-
for part in getattr(msg, "parts", []) or []:
|
|
329
|
-
tool_call_id = getattr(part, "tool_call_id", None)
|
|
330
|
-
if not tool_call_id:
|
|
331
|
-
continue
|
|
332
|
-
# Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
|
|
333
|
-
# consider it a call; otherwise it's a return/result.
|
|
334
|
-
if part.part_kind == "tool-call":
|
|
335
|
-
tool_call_ids.add(tool_call_id)
|
|
336
|
-
else:
|
|
337
|
-
tool_return_ids.add(tool_call_id)
|
|
338
|
-
|
|
339
|
-
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
340
|
-
if not mismatched:
|
|
341
|
-
return messages
|
|
342
|
-
|
|
343
|
-
pruned: List[ModelMessage] = []
|
|
344
|
-
dropped_count = 0
|
|
345
|
-
for msg in messages:
|
|
346
|
-
has_mismatched = False
|
|
347
|
-
for part in getattr(msg, "parts", []) or []:
|
|
348
|
-
tcid = getattr(part, "tool_call_id", None)
|
|
349
|
-
if tcid and tcid in mismatched:
|
|
350
|
-
has_mismatched = True
|
|
351
|
-
break
|
|
352
|
-
if has_mismatched:
|
|
353
|
-
dropped_count += 1
|
|
354
|
-
continue
|
|
355
|
-
pruned.append(msg)
|
|
356
|
-
|
|
357
|
-
if dropped_count:
|
|
358
|
-
emit_warning(
|
|
359
|
-
f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
|
|
360
|
-
)
|
|
361
|
-
return pruned
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
365
|
-
# First, prune any interrupted/mismatched tool-call conversations
|
|
366
|
-
total_current_tokens = sum(estimate_tokens_for_message(msg) for msg in messages)
|
|
367
|
-
|
|
368
|
-
model_max = get_model_context_length()
|
|
369
|
-
|
|
370
|
-
proportion_used = total_current_tokens / model_max
|
|
371
|
-
|
|
372
|
-
# Check if we're in TUI mode and can update the status bar
|
|
373
|
-
from code_puppy.state_management import get_tui_app_instance, is_tui_mode
|
|
374
|
-
|
|
375
|
-
if is_tui_mode():
|
|
376
|
-
tui_app = get_tui_app_instance()
|
|
377
|
-
if tui_app:
|
|
378
|
-
try:
|
|
379
|
-
# Update the status bar instead of emitting a chat message
|
|
380
|
-
status_bar = tui_app.query_one("StatusBar")
|
|
381
|
-
status_bar.update_token_info(
|
|
382
|
-
total_current_tokens, model_max, proportion_used
|
|
383
|
-
)
|
|
384
|
-
except Exception as e:
|
|
385
|
-
emit_error(e)
|
|
386
|
-
# Fallback to chat message if status bar update fails
|
|
387
|
-
emit_info(
|
|
388
|
-
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
|
|
389
|
-
message_group="token_context_status",
|
|
390
|
-
)
|
|
391
|
-
else:
|
|
392
|
-
# Fallback if no TUI app instance
|
|
393
|
-
emit_info(
|
|
394
|
-
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
|
|
395
|
-
message_group="token_context_status",
|
|
396
|
-
)
|
|
397
|
-
else:
|
|
398
|
-
# Non-TUI mode - emit to console as before
|
|
399
|
-
emit_info(
|
|
400
|
-
f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n"
|
|
401
|
-
)
|
|
402
|
-
# Get the configured compaction threshold
|
|
403
|
-
compaction_threshold = get_compaction_threshold()
|
|
404
|
-
|
|
405
|
-
# Get the configured compaction strategy
|
|
406
|
-
compaction_strategy = get_compaction_strategy()
|
|
407
|
-
|
|
408
|
-
if proportion_used > compaction_threshold:
|
|
409
|
-
if compaction_strategy == "truncation":
|
|
410
|
-
# Use truncation instead of summarization
|
|
411
|
-
protected_tokens = get_protected_token_count()
|
|
412
|
-
result_messages = truncation(
|
|
413
|
-
filter_huge_messages(messages), protected_tokens
|
|
414
|
-
)
|
|
415
|
-
summarized_messages = [] # No summarization in truncation mode
|
|
416
|
-
else:
|
|
417
|
-
# Default to summarization
|
|
418
|
-
result_messages, summarized_messages = summarize_messages(
|
|
419
|
-
filter_huge_messages(messages)
|
|
420
|
-
)
|
|
421
|
-
|
|
422
|
-
final_token_count = sum(
|
|
423
|
-
estimate_tokens_for_message(msg) for msg in result_messages
|
|
424
|
-
)
|
|
425
|
-
# Update status bar with final token count if in TUI mode
|
|
426
|
-
if is_tui_mode():
|
|
427
|
-
tui_app = get_tui_app_instance()
|
|
428
|
-
if tui_app:
|
|
429
|
-
try:
|
|
430
|
-
status_bar = tui_app.query_one("StatusBar")
|
|
431
|
-
status_bar.update_token_info(
|
|
432
|
-
final_token_count, model_max, final_token_count / model_max
|
|
433
|
-
)
|
|
434
|
-
except Exception:
|
|
435
|
-
emit_info(
|
|
436
|
-
f"Final token count after processing: {final_token_count}",
|
|
437
|
-
message_group="token_context_status",
|
|
438
|
-
)
|
|
439
|
-
else:
|
|
440
|
-
emit_info(
|
|
441
|
-
f"Final token count after processing: {final_token_count}",
|
|
442
|
-
message_group="token_context_status",
|
|
443
|
-
)
|
|
444
|
-
else:
|
|
445
|
-
emit_info(f"Final token count after processing: {final_token_count}")
|
|
446
|
-
set_message_history(result_messages)
|
|
447
|
-
for m in summarized_messages:
|
|
448
|
-
add_compacted_message_hash(hash_message(m))
|
|
449
|
-
return result_messages
|
|
450
|
-
return messages
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
def truncation(
|
|
454
|
-
messages: List[ModelMessage], protected_tokens: int
|
|
455
|
-
) -> List[ModelMessage]:
|
|
456
|
-
emit_info("Truncating message history to manage token usage")
|
|
457
|
-
result = [messages[0]] # Always keep the first message (system prompt)
|
|
458
|
-
num_tokens = 0
|
|
459
|
-
stack = queue.LifoQueue()
|
|
460
|
-
|
|
461
|
-
# Put messages in reverse order (most recent first) into the stack
|
|
462
|
-
# but break when we exceed protected_tokens
|
|
463
|
-
for idx, msg in enumerate(reversed(messages[1:])): # Skip the first message
|
|
464
|
-
num_tokens += estimate_tokens_for_message(msg)
|
|
465
|
-
if num_tokens > protected_tokens:
|
|
466
|
-
break
|
|
467
|
-
stack.put(msg)
|
|
468
|
-
|
|
469
|
-
# Pop messages from stack to get them in chronological order
|
|
470
|
-
while not stack.empty():
|
|
471
|
-
result.append(stack.get())
|
|
472
|
-
|
|
473
|
-
result = prune_interrupted_tool_calls(result)
|
|
474
|
-
return result
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
def message_history_accumulator(messages: List[Any]):
|
|
478
|
-
_message_history = get_message_history()
|
|
479
|
-
message_history_hashes = set([hash_message(m) for m in _message_history])
|
|
480
|
-
for msg in messages:
|
|
481
|
-
if (
|
|
482
|
-
hash_message(msg) not in message_history_hashes
|
|
483
|
-
and hash_message(msg) not in get_compacted_message_hashes()
|
|
484
|
-
):
|
|
485
|
-
_message_history.append(msg)
|
|
486
|
-
|
|
487
|
-
# Apply message history trimming using the main processor
|
|
488
|
-
# This ensures we maintain global state while still managing context limits
|
|
489
|
-
message_history_processor(_message_history)
|
|
490
|
-
return get_message_history()
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Textual spinner implementation for TUI mode.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from textual.widgets import Static
|
|
6
|
-
|
|
7
|
-
from .spinner_base import SpinnerBase
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class TextualSpinner(Static):
|
|
11
|
-
"""A textual spinner widget based on the SimpleSpinnerWidget."""
|
|
12
|
-
|
|
13
|
-
# Use the frames from SpinnerBase
|
|
14
|
-
FRAMES = SpinnerBase.FRAMES
|
|
15
|
-
|
|
16
|
-
def __init__(self, **kwargs):
|
|
17
|
-
"""Initialize the textual spinner."""
|
|
18
|
-
super().__init__("", **kwargs)
|
|
19
|
-
self._frame_index = 0
|
|
20
|
-
self._is_spinning = False
|
|
21
|
-
self._timer = None
|
|
22
|
-
self._paused = False
|
|
23
|
-
self._previous_state = ""
|
|
24
|
-
|
|
25
|
-
# Register this spinner for global management
|
|
26
|
-
from . import register_spinner
|
|
27
|
-
|
|
28
|
-
register_spinner(self)
|
|
29
|
-
|
|
30
|
-
def start_spinning(self):
|
|
31
|
-
"""Start the spinner animation using Textual's timer system."""
|
|
32
|
-
if not self._is_spinning:
|
|
33
|
-
self._is_spinning = True
|
|
34
|
-
self._frame_index = 0
|
|
35
|
-
self.update_frame_display()
|
|
36
|
-
# Start the animation timer using Textual's timer system
|
|
37
|
-
self._timer = self.set_interval(0.10, self.update_frame_display)
|
|
38
|
-
|
|
39
|
-
def stop_spinning(self):
|
|
40
|
-
"""Stop the spinner animation."""
|
|
41
|
-
self._is_spinning = False
|
|
42
|
-
if self._timer:
|
|
43
|
-
self._timer.stop()
|
|
44
|
-
self._timer = None
|
|
45
|
-
self.update("")
|
|
46
|
-
|
|
47
|
-
# Unregister this spinner from global management
|
|
48
|
-
from . import unregister_spinner
|
|
49
|
-
|
|
50
|
-
unregister_spinner(self)
|
|
51
|
-
|
|
52
|
-
def update_frame(self):
|
|
53
|
-
"""Update to the next frame."""
|
|
54
|
-
if self._is_spinning:
|
|
55
|
-
self._frame_index = (self._frame_index + 1) % len(self.FRAMES)
|
|
56
|
-
|
|
57
|
-
def update_frame_display(self):
|
|
58
|
-
"""Update the display with the current frame."""
|
|
59
|
-
if self._is_spinning:
|
|
60
|
-
self.update_frame()
|
|
61
|
-
current_frame = self.FRAMES[self._frame_index]
|
|
62
|
-
|
|
63
|
-
# Check if we're awaiting user input to determine which message to show
|
|
64
|
-
from code_puppy.tools.command_runner import is_awaiting_user_input
|
|
65
|
-
|
|
66
|
-
if is_awaiting_user_input():
|
|
67
|
-
# Show waiting message when waiting for user input
|
|
68
|
-
message = SpinnerBase.WAITING_MESSAGE
|
|
69
|
-
else:
|
|
70
|
-
# Show thinking message during normal processing
|
|
71
|
-
message = SpinnerBase.THINKING_MESSAGE
|
|
72
|
-
|
|
73
|
-
self.update(
|
|
74
|
-
f"[bold cyan]{message}[/bold cyan][bold cyan]{current_frame}[/bold cyan]"
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
def pause(self):
|
|
78
|
-
"""Pause the spinner animation temporarily."""
|
|
79
|
-
if self._is_spinning and self._timer and not self._paused:
|
|
80
|
-
self._paused = True
|
|
81
|
-
self._timer.pause()
|
|
82
|
-
# Store current state but don't clear it completely
|
|
83
|
-
self._previous_state = self.renderable
|
|
84
|
-
self.update("")
|
|
85
|
-
|
|
86
|
-
def resume(self):
|
|
87
|
-
"""Resume a paused spinner animation."""
|
|
88
|
-
# Check if we should show a spinner - don't resume if waiting for user input
|
|
89
|
-
from code_puppy.tools.command_runner import is_awaiting_user_input
|
|
90
|
-
|
|
91
|
-
if is_awaiting_user_input():
|
|
92
|
-
return # Don't resume if waiting for user input
|
|
93
|
-
|
|
94
|
-
if self._is_spinning and self._timer and self._paused:
|
|
95
|
-
self._paused = False
|
|
96
|
-
self._timer.resume()
|
|
97
|
-
# Restore previous state instead of immediately updating display
|
|
98
|
-
if self._previous_state:
|
|
99
|
-
self.update(self._previous_state)
|
|
100
|
-
else:
|
|
101
|
-
self.update_frame_display()
|