code-puppy 0.0.171__py3-none-any.whl → 0.0.173__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agent.py +8 -8
- code_puppy/agents/agent_creator_agent.py +0 -3
- code_puppy/agents/agent_qa_kitten.py +203 -0
- code_puppy/agents/base_agent.py +398 -2
- code_puppy/command_line/command_handler.py +68 -28
- code_puppy/command_line/mcp/add_command.py +2 -2
- code_puppy/command_line/mcp/base.py +1 -1
- code_puppy/command_line/mcp/install_command.py +2 -2
- code_puppy/command_line/mcp/list_command.py +1 -1
- code_puppy/command_line/mcp/search_command.py +1 -1
- code_puppy/command_line/mcp/start_all_command.py +1 -1
- code_puppy/command_line/mcp/status_command.py +2 -2
- code_puppy/command_line/mcp/stop_all_command.py +1 -1
- code_puppy/command_line/mcp/utils.py +1 -1
- code_puppy/command_line/mcp/wizard_utils.py +2 -2
- code_puppy/config.py +141 -12
- code_puppy/http_utils.py +50 -24
- code_puppy/main.py +2 -1
- code_puppy/{mcp → mcp_}/config_wizard.py +1 -1
- code_puppy/{mcp → mcp_}/examples/retry_example.py +1 -1
- code_puppy/{mcp → mcp_}/managed_server.py +1 -1
- code_puppy/{mcp → mcp_}/server_registry_catalog.py +1 -3
- code_puppy/message_history_processor.py +83 -221
- code_puppy/messaging/message_queue.py +4 -4
- code_puppy/state_management.py +1 -100
- code_puppy/tools/__init__.py +103 -6
- code_puppy/tools/browser/__init__.py +0 -0
- code_puppy/tools/browser/browser_control.py +293 -0
- code_puppy/tools/browser/browser_interactions.py +552 -0
- code_puppy/tools/browser/browser_locators.py +642 -0
- code_puppy/tools/browser/browser_navigation.py +251 -0
- code_puppy/tools/browser/browser_screenshot.py +242 -0
- code_puppy/tools/browser/browser_scripts.py +478 -0
- code_puppy/tools/browser/browser_workflows.py +196 -0
- code_puppy/tools/browser/camoufox_manager.py +194 -0
- code_puppy/tools/browser/vqa_agent.py +66 -0
- code_puppy/tools/browser_control.py +293 -0
- code_puppy/tools/browser_interactions.py +552 -0
- code_puppy/tools/browser_locators.py +642 -0
- code_puppy/tools/browser_navigation.py +251 -0
- code_puppy/tools/browser_screenshot.py +278 -0
- code_puppy/tools/browser_scripts.py +478 -0
- code_puppy/tools/browser_workflows.py +215 -0
- code_puppy/tools/camoufox_manager.py +150 -0
- code_puppy/tools/command_runner.py +13 -8
- code_puppy/tools/file_operations.py +7 -7
- code_puppy/tui/app.py +1 -1
- code_puppy/tui/components/custom_widgets.py +1 -1
- code_puppy/tui/screens/mcp_install_wizard.py +8 -8
- code_puppy/tui_state.py +55 -0
- {code_puppy-0.0.171.dist-info → code_puppy-0.0.173.dist-info}/METADATA +3 -1
- code_puppy-0.0.173.dist-info/RECORD +132 -0
- code_puppy-0.0.171.dist-info/RECORD +0 -112
- /code_puppy/{mcp → mcp_}/__init__.py +0 -0
- /code_puppy/{mcp → mcp_}/async_lifecycle.py +0 -0
- /code_puppy/{mcp → mcp_}/blocking_startup.py +0 -0
- /code_puppy/{mcp → mcp_}/captured_stdio_server.py +0 -0
- /code_puppy/{mcp → mcp_}/circuit_breaker.py +0 -0
- /code_puppy/{mcp → mcp_}/dashboard.py +0 -0
- /code_puppy/{mcp → mcp_}/error_isolation.py +0 -0
- /code_puppy/{mcp → mcp_}/health_monitor.py +0 -0
- /code_puppy/{mcp → mcp_}/manager.py +0 -0
- /code_puppy/{mcp → mcp_}/registry.py +0 -0
- /code_puppy/{mcp → mcp_}/retry_manager.py +0 -0
- /code_puppy/{mcp → mcp_}/status_tracker.py +0 -0
- /code_puppy/{mcp → mcp_}/system_tools.py +0 -0
- {code_puppy-0.0.171.data → code_puppy-0.0.173.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.171.dist-info → code_puppy-0.0.173.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.171.dist-info → code_puppy-0.0.173.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.171.dist-info → code_puppy-0.0.173.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,7 +11,7 @@ from urllib.parse import urlparse
|
|
|
11
11
|
|
|
12
12
|
from rich.console import Console
|
|
13
13
|
|
|
14
|
-
from code_puppy.
|
|
14
|
+
from code_puppy.mcp_.manager import ServerConfig, get_mcp_manager
|
|
15
15
|
from code_puppy.messaging import (
|
|
16
16
|
emit_error,
|
|
17
17
|
emit_info,
|
|
@@ -17,7 +17,7 @@ from typing import Any
|
|
|
17
17
|
project_root = Path(__file__).parents[3]
|
|
18
18
|
sys.path.insert(0, str(project_root))
|
|
19
19
|
|
|
20
|
-
from code_puppy.
|
|
20
|
+
from code_puppy.mcp_.retry_manager import get_retry_manager, retry_mcp_call # noqa: E402
|
|
21
21
|
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
23
23
|
|
|
@@ -24,7 +24,7 @@ from pydantic_ai.mcp import (
|
|
|
24
24
|
)
|
|
25
25
|
|
|
26
26
|
from code_puppy.http_utils import create_async_client
|
|
27
|
-
from code_puppy.
|
|
27
|
+
from code_puppy.mcp_.blocking_startup import BlockingMCPServerStdio
|
|
28
28
|
from code_puppy.messaging import emit_info
|
|
29
29
|
|
|
30
30
|
# Configure logging
|
|
@@ -794,9 +794,7 @@ MCP_SERVER_REGISTRY: List[MCPServerTemplate] = [
|
|
|
794
794
|
type="http",
|
|
795
795
|
config={
|
|
796
796
|
"url": "https://mcp.context7.com/mcp",
|
|
797
|
-
|
|
798
|
-
"Authorization": "Bearer $CONTEXT7_API_KEY"
|
|
799
|
-
}
|
|
797
|
+
"headers": {"Authorization": "Bearer $CONTEXT7_API_KEY"},
|
|
800
798
|
},
|
|
801
799
|
verified=True,
|
|
802
800
|
popular=True,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import queue
|
|
3
|
-
from typing import Any, List, Set, Tuple
|
|
3
|
+
from typing import Any, List, Set, Tuple, Union
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
6
|
from pydantic_ai.messages import (
|
|
@@ -25,7 +25,6 @@ from code_puppy.state_management import (
|
|
|
25
25
|
add_compacted_message_hash,
|
|
26
26
|
get_compacted_message_hashes,
|
|
27
27
|
get_message_history,
|
|
28
|
-
hash_message,
|
|
29
28
|
set_message_history,
|
|
30
29
|
)
|
|
31
30
|
from code_puppy.summarization_agent import run_summarization_sync
|
|
@@ -44,34 +43,10 @@ def stringify_message_part(part) -> str:
|
|
|
44
43
|
Returns:
|
|
45
44
|
String representation of the message part
|
|
46
45
|
"""
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
result += str(type(part)) + ": "
|
|
52
|
-
|
|
53
|
-
# Handle content
|
|
54
|
-
if hasattr(part, "content") and part.content:
|
|
55
|
-
# Handle different content types
|
|
56
|
-
if isinstance(part.content, str):
|
|
57
|
-
result = part.content
|
|
58
|
-
elif isinstance(part.content, pydantic.BaseModel):
|
|
59
|
-
result = json.dumps(part.content.model_dump())
|
|
60
|
-
elif isinstance(part.content, dict):
|
|
61
|
-
result = json.dumps(part.content)
|
|
62
|
-
else:
|
|
63
|
-
result = str(part.content)
|
|
64
|
-
|
|
65
|
-
# Handle tool calls which may have additional token costs
|
|
66
|
-
# If part also has content, we'll process tool calls separately
|
|
67
|
-
if hasattr(part, "tool_name") and part.tool_name:
|
|
68
|
-
# Estimate tokens for tool name and parameters
|
|
69
|
-
tool_text = part.tool_name
|
|
70
|
-
if hasattr(part, "args"):
|
|
71
|
-
tool_text += f" {str(part.args)}"
|
|
72
|
-
result += tool_text
|
|
73
|
-
|
|
74
|
-
return result
|
|
46
|
+
# Get current agent to use its method
|
|
47
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
48
|
+
current_agent = get_current_agent_config()
|
|
49
|
+
return current_agent.stringify_message_part(part)
|
|
75
50
|
|
|
76
51
|
|
|
77
52
|
def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
@@ -79,61 +54,44 @@ def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
|
79
54
|
Estimate the number of tokens in a message using len(message) - 4.
|
|
80
55
|
Simple and fast replacement for tiktoken.
|
|
81
56
|
"""
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
if part_str:
|
|
87
|
-
total_tokens += len(part_str)
|
|
88
|
-
|
|
89
|
-
return int(max(1, total_tokens) / 4)
|
|
57
|
+
# Get current agent to use its method
|
|
58
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
59
|
+
current_agent = get_current_agent_config()
|
|
60
|
+
return current_agent.estimate_tokens_for_message(message)
|
|
90
61
|
|
|
91
62
|
|
|
92
63
|
def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
93
64
|
if not messages:
|
|
94
65
|
return []
|
|
95
66
|
|
|
67
|
+
# Get current agent to use its method
|
|
68
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
69
|
+
current_agent = get_current_agent_config()
|
|
70
|
+
|
|
96
71
|
# Never drop the system prompt, even if it is extremely large.
|
|
97
72
|
system_message, *rest = messages
|
|
98
73
|
filtered_rest = [
|
|
99
|
-
m for m in rest if estimate_tokens_for_message(m) < 50000
|
|
74
|
+
m for m in rest if current_agent.estimate_tokens_for_message(m) < 50000
|
|
100
75
|
]
|
|
101
76
|
return [system_message] + filtered_rest
|
|
102
77
|
|
|
103
78
|
|
|
104
79
|
def _is_tool_call_part(part: Any) -> bool:
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
if part_kind == "tool-call":
|
|
110
|
-
return True
|
|
111
|
-
|
|
112
|
-
has_tool_name = getattr(part, "tool_name", None) is not None
|
|
113
|
-
has_args = getattr(part, "args", None) is not None
|
|
114
|
-
has_args_delta = getattr(part, "args_delta", None) is not None
|
|
115
|
-
|
|
116
|
-
return bool(has_tool_name and (has_args or has_args_delta))
|
|
80
|
+
# Get current agent to use its method
|
|
81
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
82
|
+
current_agent = get_current_agent_config()
|
|
83
|
+
return current_agent._is_tool_call_part(part)
|
|
117
84
|
|
|
118
85
|
|
|
119
86
|
def _is_tool_return_part(part: Any) -> bool:
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
if part_kind in {"tool-return", "tool-result"}:
|
|
125
|
-
return True
|
|
126
|
-
|
|
127
|
-
if getattr(part, "tool_call_id", None) is None:
|
|
128
|
-
return False
|
|
129
|
-
|
|
130
|
-
has_content = getattr(part, "content", None) is not None
|
|
131
|
-
has_content_delta = getattr(part, "content_delta", None) is not None
|
|
132
|
-
return bool(has_content or has_content_delta)
|
|
87
|
+
# Get current agent to use its method
|
|
88
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
89
|
+
current_agent = get_current_agent_config()
|
|
90
|
+
return current_agent._is_tool_return_part(part)
|
|
133
91
|
|
|
134
92
|
|
|
135
93
|
def split_messages_for_protected_summarization(
|
|
136
|
-
messages: List[ModelMessage],
|
|
94
|
+
messages: List[ModelMessage], with_protection: bool = True
|
|
137
95
|
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
138
96
|
"""
|
|
139
97
|
Split messages into two groups: messages to summarize and protected recent messages.
|
|
@@ -150,7 +108,13 @@ def split_messages_for_protected_summarization(
|
|
|
150
108
|
|
|
151
109
|
# Always protect the system message (first message)
|
|
152
110
|
system_message = messages[0]
|
|
153
|
-
|
|
111
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
112
|
+
current_agent = get_current_agent_config()
|
|
113
|
+
system_tokens = current_agent.estimate_tokens_for_message(system_message)
|
|
114
|
+
|
|
115
|
+
if not with_protection:
|
|
116
|
+
# If not protecting, summarize everything except the system message
|
|
117
|
+
return messages[1:], [system_message]
|
|
154
118
|
|
|
155
119
|
if len(messages) == 1:
|
|
156
120
|
return [], messages
|
|
@@ -165,7 +129,7 @@ def split_messages_for_protected_summarization(
|
|
|
165
129
|
# Go backwards through non-system messages to find protected zone
|
|
166
130
|
for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
|
|
167
131
|
message = messages[i]
|
|
168
|
-
message_tokens = estimate_tokens_for_message(message)
|
|
132
|
+
message_tokens = current_agent.estimate_tokens_for_message(message)
|
|
169
133
|
|
|
170
134
|
# If adding this message would exceed protected tokens, stop here
|
|
171
135
|
if protected_token_count + message_tokens > protected_tokens_limit:
|
|
@@ -192,62 +156,16 @@ def split_messages_for_protected_summarization(
|
|
|
192
156
|
return messages_to_summarize, protected_messages
|
|
193
157
|
|
|
194
158
|
|
|
195
|
-
def
|
|
159
|
+
def run_summarization_sync(
|
|
160
|
+
instructions: str,
|
|
161
|
+
message_history: List[ModelMessage],
|
|
162
|
+
) -> Union[List[ModelMessage], str]:
|
|
196
163
|
"""
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
This function identifies tool-return parts that share the same tool_call_id and
|
|
200
|
-
removes duplicates, keeping only the first return for each id. This prevents
|
|
201
|
-
conversation corruption from duplicate tool_result blocks.
|
|
164
|
+
Run summarization synchronously using the configured summarization agent.
|
|
165
|
+
This is exposed as a global function so tests can mock it.
|
|
202
166
|
"""
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
seen_tool_returns: Set[str] = set()
|
|
207
|
-
deduplicated: List[ModelMessage] = []
|
|
208
|
-
removed_count = 0
|
|
209
|
-
|
|
210
|
-
for msg in messages:
|
|
211
|
-
if not hasattr(msg, "parts") or not msg.parts:
|
|
212
|
-
deduplicated.append(msg)
|
|
213
|
-
continue
|
|
214
|
-
|
|
215
|
-
filtered_parts = []
|
|
216
|
-
msg_had_duplicates = False
|
|
217
|
-
|
|
218
|
-
for part in msg.parts:
|
|
219
|
-
tool_call_id = getattr(part, "tool_call_id", None)
|
|
220
|
-
if tool_call_id and _is_tool_return_part(part):
|
|
221
|
-
if tool_call_id in seen_tool_returns:
|
|
222
|
-
msg_had_duplicates = True
|
|
223
|
-
removed_count += 1
|
|
224
|
-
continue
|
|
225
|
-
seen_tool_returns.add(tool_call_id)
|
|
226
|
-
filtered_parts.append(part)
|
|
227
|
-
|
|
228
|
-
if not filtered_parts:
|
|
229
|
-
continue
|
|
230
|
-
|
|
231
|
-
if msg_had_duplicates:
|
|
232
|
-
new_msg = type(msg)(parts=filtered_parts)
|
|
233
|
-
for attr_name in dir(msg):
|
|
234
|
-
if (
|
|
235
|
-
not attr_name.startswith("_")
|
|
236
|
-
and attr_name != "parts"
|
|
237
|
-
and hasattr(msg, attr_name)
|
|
238
|
-
):
|
|
239
|
-
try:
|
|
240
|
-
setattr(new_msg, attr_name, getattr(msg, attr_name))
|
|
241
|
-
except (AttributeError, TypeError):
|
|
242
|
-
pass
|
|
243
|
-
deduplicated.append(new_msg)
|
|
244
|
-
else:
|
|
245
|
-
deduplicated.append(msg)
|
|
246
|
-
|
|
247
|
-
if removed_count > 0:
|
|
248
|
-
emit_warning(f"Removed {removed_count} duplicate tool-return part(s)")
|
|
249
|
-
|
|
250
|
-
return deduplicated
|
|
167
|
+
from code_puppy.summarization_agent import run_summarization_sync as _run_summarization_sync
|
|
168
|
+
return _run_summarization_sync(instructions, message_history)
|
|
251
169
|
|
|
252
170
|
|
|
253
171
|
def summarize_messages(
|
|
@@ -261,26 +179,22 @@ def summarize_messages(
|
|
|
261
179
|
where compacted_messages always preserves the original system message
|
|
262
180
|
as the first entry.
|
|
263
181
|
"""
|
|
264
|
-
messages_to_summarize: List[ModelMessage]
|
|
265
|
-
protected_messages: List[ModelMessage]
|
|
266
|
-
|
|
267
|
-
if with_protection:
|
|
268
|
-
messages_to_summarize, protected_messages = (
|
|
269
|
-
split_messages_for_protected_summarization(messages)
|
|
270
|
-
)
|
|
271
|
-
else:
|
|
272
|
-
messages_to_summarize = messages[1:] if messages else []
|
|
273
|
-
protected_messages = messages[:1]
|
|
274
|
-
|
|
275
182
|
if not messages:
|
|
276
183
|
return [], []
|
|
277
184
|
|
|
278
|
-
|
|
185
|
+
# Split messages into those to summarize and those to protect
|
|
186
|
+
messages_to_summarize, protected_messages = split_messages_for_protected_summarization(
|
|
187
|
+
messages, with_protection
|
|
188
|
+
)
|
|
279
189
|
|
|
190
|
+
# If nothing to summarize, return the original list
|
|
280
191
|
if not messages_to_summarize:
|
|
281
|
-
# Nothing to summarize, so just return the original sequence
|
|
282
192
|
return prune_interrupted_tool_calls(messages), []
|
|
283
193
|
|
|
194
|
+
# Get the system message (always the first message)
|
|
195
|
+
system_message = messages[0]
|
|
196
|
+
|
|
197
|
+
# Instructions for the summarization agent
|
|
284
198
|
instructions = (
|
|
285
199
|
"The input will be a log of Agentic AI steps that have been taken"
|
|
286
200
|
" as well as user queries, etc. Summarize the contents of these steps."
|
|
@@ -293,6 +207,7 @@ def summarize_messages(
|
|
|
293
207
|
)
|
|
294
208
|
|
|
295
209
|
try:
|
|
210
|
+
# Use the global function so tests can mock it
|
|
296
211
|
new_messages = run_summarization_sync(
|
|
297
212
|
instructions, message_history=messages_to_summarize
|
|
298
213
|
)
|
|
@@ -303,6 +218,7 @@ def summarize_messages(
|
|
|
303
218
|
)
|
|
304
219
|
new_messages = [ModelRequest([TextPart(str(new_messages))])]
|
|
305
220
|
|
|
221
|
+
# Construct compacted messages: system message + new summarized messages + protected tail
|
|
306
222
|
compacted: List[ModelMessage] = [system_message] + list(new_messages)
|
|
307
223
|
|
|
308
224
|
# Drop the system message from protected_messages because we already included it
|
|
@@ -317,47 +233,22 @@ def summarize_messages(
|
|
|
317
233
|
|
|
318
234
|
|
|
319
235
|
def summarize_message(message: ModelMessage) -> ModelMessage:
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
# If any part is a tool call, skip summarization
|
|
326
|
-
for part in message.parts:
|
|
327
|
-
if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
|
|
328
|
-
return message
|
|
329
|
-
# Build prompt from textual content parts
|
|
330
|
-
content_bits: List[str] = []
|
|
331
|
-
for part in message.parts:
|
|
332
|
-
s = stringify_message_part(part)
|
|
333
|
-
if s:
|
|
334
|
-
content_bits.append(s)
|
|
335
|
-
if not content_bits:
|
|
336
|
-
return message
|
|
337
|
-
prompt = "Please summarize the following user message:\n" + "\n".join(
|
|
338
|
-
content_bits
|
|
339
|
-
)
|
|
340
|
-
output_text = run_summarization_sync(prompt)
|
|
341
|
-
summarized = ModelRequest([TextPart(output_text)])
|
|
342
|
-
return summarized
|
|
343
|
-
except Exception as e:
|
|
344
|
-
emit_error(f"Summarization failed: {e}")
|
|
345
|
-
return message
|
|
236
|
+
# Get current agent to use its method
|
|
237
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
238
|
+
current_agent = get_current_agent_config()
|
|
239
|
+
|
|
240
|
+
return current_agent.summarize_message(message)
|
|
346
241
|
|
|
347
242
|
|
|
348
243
|
def get_model_context_length() -> int:
|
|
349
244
|
"""
|
|
350
245
|
Get the context length for the currently configured model from models.json
|
|
351
246
|
"""
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
context_length = model_config.get("context_length", 128000) # Default value
|
|
358
|
-
|
|
359
|
-
# Reserve 10% of context for response
|
|
360
|
-
return int(context_length)
|
|
247
|
+
# Get current agent to use its method
|
|
248
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
249
|
+
current_agent = get_current_agent_config()
|
|
250
|
+
|
|
251
|
+
return current_agent.get_model_context_length()
|
|
361
252
|
|
|
362
253
|
|
|
363
254
|
def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
@@ -368,64 +259,30 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
|
|
|
368
259
|
without a corresponding tool return, or vice versa. We preserve original order
|
|
369
260
|
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
370
261
|
"""
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
# First pass: collect ids for calls vs returns
|
|
378
|
-
for msg in messages:
|
|
379
|
-
for part in getattr(msg, "parts", []) or []:
|
|
380
|
-
tool_call_id = getattr(part, "tool_call_id", None)
|
|
381
|
-
if not tool_call_id:
|
|
382
|
-
continue
|
|
383
|
-
|
|
384
|
-
if _is_tool_call_part(part) and not _is_tool_return_part(part):
|
|
385
|
-
tool_call_ids.add(tool_call_id)
|
|
386
|
-
elif _is_tool_return_part(part):
|
|
387
|
-
tool_return_ids.add(tool_call_id)
|
|
388
|
-
|
|
389
|
-
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
390
|
-
if not mismatched:
|
|
391
|
-
return messages
|
|
392
|
-
|
|
393
|
-
pruned: List[ModelMessage] = []
|
|
394
|
-
dropped_count = 0
|
|
395
|
-
for msg in messages:
|
|
396
|
-
has_mismatched = False
|
|
397
|
-
for part in getattr(msg, "parts", []) or []:
|
|
398
|
-
tcid = getattr(part, "tool_call_id", None)
|
|
399
|
-
if tcid and tcid in mismatched:
|
|
400
|
-
has_mismatched = True
|
|
401
|
-
break
|
|
402
|
-
if has_mismatched:
|
|
403
|
-
dropped_count += 1
|
|
404
|
-
continue
|
|
405
|
-
pruned.append(msg)
|
|
406
|
-
|
|
407
|
-
if dropped_count:
|
|
408
|
-
emit_warning(
|
|
409
|
-
f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
|
|
410
|
-
)
|
|
411
|
-
return pruned
|
|
262
|
+
# Get current agent to use its method
|
|
263
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
264
|
+
current_agent = get_current_agent_config()
|
|
265
|
+
|
|
266
|
+
return current_agent.prune_interrupted_tool_calls(messages)
|
|
412
267
|
|
|
413
268
|
|
|
414
269
|
def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
)
|
|
270
|
+
# Get current agent to use its methods
|
|
271
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
272
|
+
current_agent = get_current_agent_config()
|
|
273
|
+
|
|
274
|
+
cleaned_history = current_agent.prune_interrupted_tool_calls(messages)
|
|
418
275
|
|
|
419
276
|
total_current_tokens = sum(
|
|
420
|
-
estimate_tokens_for_message(msg) for msg in cleaned_history
|
|
277
|
+
current_agent.estimate_tokens_for_message(msg) for msg in cleaned_history
|
|
421
278
|
)
|
|
422
279
|
|
|
423
|
-
model_max = get_model_context_length()
|
|
280
|
+
model_max = current_agent.get_model_context_length()
|
|
424
281
|
|
|
425
282
|
proportion_used = total_current_tokens / model_max if model_max else 0
|
|
426
283
|
|
|
427
284
|
# Check if we're in TUI mode and can update the status bar
|
|
428
|
-
from code_puppy.
|
|
285
|
+
from code_puppy.tui_state import get_tui_app_instance, is_tui_mode
|
|
429
286
|
|
|
430
287
|
if is_tui_mode():
|
|
431
288
|
tui_app = get_tui_app_instance()
|
|
@@ -461,7 +318,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
|
|
|
461
318
|
compaction_strategy = get_compaction_strategy()
|
|
462
319
|
|
|
463
320
|
if proportion_used > compaction_threshold:
|
|
464
|
-
filtered_history = filter_huge_messages(cleaned_history)
|
|
321
|
+
filtered_history = current_agent.filter_huge_messages(cleaned_history)
|
|
465
322
|
|
|
466
323
|
if compaction_strategy == "truncation":
|
|
467
324
|
protected_tokens = get_protected_token_count()
|
|
@@ -473,7 +330,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
|
|
|
473
330
|
)
|
|
474
331
|
|
|
475
332
|
final_token_count = sum(
|
|
476
|
-
estimate_tokens_for_message(msg) for msg in result_messages
|
|
333
|
+
current_agent.estimate_tokens_for_message(msg) for msg in result_messages
|
|
477
334
|
)
|
|
478
335
|
# Update status bar with final token count if in TUI mode
|
|
479
336
|
if is_tui_mode():
|
|
@@ -498,7 +355,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
|
|
|
498
355
|
emit_info(f"Final token count after processing: {final_token_count}")
|
|
499
356
|
set_message_history(result_messages)
|
|
500
357
|
for m in summarized_messages:
|
|
501
|
-
add_compacted_message_hash(hash_message(m))
|
|
358
|
+
add_compacted_message_hash(current_agent.hash_message(m))
|
|
502
359
|
return result_messages
|
|
503
360
|
|
|
504
361
|
set_message_history(cleaned_history)
|
|
@@ -531,11 +388,16 @@ def truncation(
|
|
|
531
388
|
|
|
532
389
|
def message_history_accumulator(messages: List[Any]):
|
|
533
390
|
existing_history = list(get_message_history())
|
|
534
|
-
|
|
391
|
+
|
|
392
|
+
# Get current agent to use its method
|
|
393
|
+
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
394
|
+
current_agent = get_current_agent_config()
|
|
395
|
+
|
|
396
|
+
seen_hashes = {current_agent.hash_message(message) for message in existing_history}
|
|
535
397
|
compacted_hashes = get_compacted_message_hashes()
|
|
536
398
|
|
|
537
399
|
for message in messages:
|
|
538
|
-
message_hash = hash_message(message)
|
|
400
|
+
message_hash = current_agent.hash_message(message)
|
|
539
401
|
if message_hash in seen_hashes or message_hash in compacted_hashes:
|
|
540
402
|
continue
|
|
541
403
|
existing_history.append(message)
|
|
@@ -219,7 +219,7 @@ class MessageQueue:
|
|
|
219
219
|
start_time = time.time()
|
|
220
220
|
|
|
221
221
|
# Check if we're in TUI mode - if so, try to yield control to the event loop
|
|
222
|
-
from code_puppy.
|
|
222
|
+
from code_puppy.tui_state import is_tui_mode
|
|
223
223
|
|
|
224
224
|
sleep_interval = 0.05 if is_tui_mode() else 0.1
|
|
225
225
|
|
|
@@ -243,7 +243,7 @@ class MessageQueue:
|
|
|
243
243
|
|
|
244
244
|
def provide_prompt_response(self, prompt_id: str, response: str):
|
|
245
245
|
"""Provide a response to a human input request."""
|
|
246
|
-
from code_puppy.
|
|
246
|
+
from code_puppy.tui_state import is_tui_mode
|
|
247
247
|
|
|
248
248
|
if is_tui_mode():
|
|
249
249
|
print(f"[DEBUG] Providing response for {prompt_id}: {response[:20]}...")
|
|
@@ -337,7 +337,7 @@ def emit_system_message(content: Any, **metadata):
|
|
|
337
337
|
|
|
338
338
|
def emit_divider(content: str = "[dim]" + "─" * 100 + "\n" + "[/dim]", **metadata):
|
|
339
339
|
"""Emit a divider line"""
|
|
340
|
-
from code_puppy.
|
|
340
|
+
from code_puppy.tui_state import is_tui_mode
|
|
341
341
|
|
|
342
342
|
if not is_tui_mode():
|
|
343
343
|
emit_message(MessageType.DIVIDER, content, **metadata)
|
|
@@ -347,7 +347,7 @@ def emit_divider(content: str = "[dim]" + "─" * 100 + "\n" + "[/dim]", **metad
|
|
|
347
347
|
|
|
348
348
|
def emit_prompt(prompt_text: str, timeout: float = None) -> str:
|
|
349
349
|
"""Emit a human input request and wait for response."""
|
|
350
|
-
from code_puppy.
|
|
350
|
+
from code_puppy.tui_state import is_tui_mode
|
|
351
351
|
|
|
352
352
|
# In interactive mode, use direct input instead of the queue system
|
|
353
353
|
if not is_tui_mode():
|
code_puppy/state_management.py
CHANGED
|
@@ -1,11 +1,7 @@
|
|
|
1
|
-
import json
|
|
2
1
|
from types import ModuleType
|
|
3
2
|
from typing import Any, List, Set
|
|
4
3
|
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
_tui_mode: bool = False
|
|
8
|
-
_tui_app_instance: Any = None
|
|
4
|
+
from code_puppy.messaging import emit_info
|
|
9
5
|
|
|
10
6
|
|
|
11
7
|
def _require_agent_manager() -> ModuleType:
|
|
@@ -29,53 +25,6 @@ def get_compacted_message_hashes() -> Set[str]:
|
|
|
29
25
|
return manager.get_current_agent_compacted_message_hashes()
|
|
30
26
|
|
|
31
27
|
|
|
32
|
-
def set_tui_mode(enabled: bool) -> None:
|
|
33
|
-
"""Set the global TUI mode state.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
enabled: True if running in TUI mode, False otherwise
|
|
37
|
-
"""
|
|
38
|
-
global _tui_mode
|
|
39
|
-
_tui_mode = enabled
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def is_tui_mode() -> bool:
|
|
43
|
-
"""Check if the application is running in TUI mode.
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
True if running in TUI mode, False otherwise
|
|
47
|
-
"""
|
|
48
|
-
return _tui_mode
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def set_tui_app_instance(app_instance: Any) -> None:
|
|
52
|
-
"""Set the global TUI app instance reference.
|
|
53
|
-
|
|
54
|
-
Args:
|
|
55
|
-
app_instance: The TUI app instance
|
|
56
|
-
"""
|
|
57
|
-
global _tui_app_instance
|
|
58
|
-
_tui_app_instance = app_instance
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def get_tui_app_instance() -> Any:
|
|
62
|
-
"""Get the current TUI app instance.
|
|
63
|
-
|
|
64
|
-
Returns:
|
|
65
|
-
The TUI app instance if available, None otherwise
|
|
66
|
-
"""
|
|
67
|
-
return _tui_app_instance
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def get_tui_mode() -> bool:
|
|
71
|
-
"""Get the current TUI mode state.
|
|
72
|
-
|
|
73
|
-
Returns:
|
|
74
|
-
True if running in TUI mode, False otherwise
|
|
75
|
-
"""
|
|
76
|
-
return _tui_mode
|
|
77
|
-
|
|
78
|
-
|
|
79
28
|
def get_message_history() -> List[Any]:
|
|
80
29
|
"""Get message history for the active agent."""
|
|
81
30
|
manager = _require_agent_manager()
|
|
@@ -106,52 +55,4 @@ def extend_message_history(history: List[Any]) -> None:
|
|
|
106
55
|
manager.extend_current_agent_message_history(history)
|
|
107
56
|
|
|
108
57
|
|
|
109
|
-
def _stringify_part(part: Any) -> str:
|
|
110
|
-
"""Create a stable string representation for a message part.
|
|
111
|
-
|
|
112
|
-
We deliberately ignore timestamps so identical content hashes the same even when
|
|
113
|
-
emitted at different times. This prevents status updates from blowing up the
|
|
114
|
-
history when they are repeated with new timestamps."""
|
|
115
|
-
|
|
116
|
-
attributes: List[str] = [part.__class__.__name__]
|
|
117
|
-
|
|
118
|
-
# Role/instructions help disambiguate parts that otherwise share content
|
|
119
|
-
if hasattr(part, "role") and part.role:
|
|
120
|
-
attributes.append(f"role={part.role}")
|
|
121
|
-
if hasattr(part, "instructions") and part.instructions:
|
|
122
|
-
attributes.append(f"instructions={part.instructions}")
|
|
123
|
-
|
|
124
|
-
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
125
|
-
attributes.append(f"tool_call_id={part.tool_call_id}")
|
|
126
|
-
|
|
127
|
-
if hasattr(part, "tool_name") and part.tool_name:
|
|
128
|
-
attributes.append(f"tool_name={part.tool_name}")
|
|
129
|
-
|
|
130
|
-
content = getattr(part, "content", None)
|
|
131
|
-
if content is None:
|
|
132
|
-
attributes.append("content=None")
|
|
133
|
-
elif isinstance(content, str):
|
|
134
|
-
attributes.append(f"content={content}")
|
|
135
|
-
elif isinstance(content, pydantic.BaseModel):
|
|
136
|
-
attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
|
|
137
|
-
elif isinstance(content, dict):
|
|
138
|
-
attributes.append(f"content={json.dumps(content, sort_keys=True)}")
|
|
139
|
-
else:
|
|
140
|
-
attributes.append(f"content={repr(content)}")
|
|
141
|
-
|
|
142
|
-
return "|".join(attributes)
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def hash_message(message: Any) -> int:
|
|
146
|
-
"""Create a stable hash for a model message that ignores timestamps."""
|
|
147
|
-
role = getattr(message, "role", None)
|
|
148
|
-
instructions = getattr(message, "instructions", None)
|
|
149
|
-
header_bits: List[str] = []
|
|
150
|
-
if role:
|
|
151
|
-
header_bits.append(f"role={role}")
|
|
152
|
-
if instructions:
|
|
153
|
-
header_bits.append(f"instructions={instructions}")
|
|
154
58
|
|
|
155
|
-
part_strings = [_stringify_part(part) for part in getattr(message, "parts", [])]
|
|
156
|
-
canonical = "||".join(header_bits + part_strings)
|
|
157
|
-
return hash(canonical)
|