code-puppy 0.0.172__py3-none-any.whl → 0.0.173__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
code_puppy/agent.py CHANGED
@@ -7,10 +7,7 @@ from pydantic_ai import Agent
7
7
  from pydantic_ai.settings import ModelSettings
8
8
  from pydantic_ai.usage import UsageLimits
9
9
 
10
- from code_puppy.message_history_processor import (
11
- get_model_context_length,
12
- message_history_accumulator,
13
- )
10
+ from code_puppy.message_history_processor import message_history_accumulator
14
11
  from code_puppy.messaging.message_queue import (
15
12
  emit_error,
16
13
  emit_info,
@@ -167,7 +164,10 @@ def reload_code_generation_agent(message_group: str | None):
167
164
 
168
165
  # Configure model settings with max_tokens if set
169
166
  model_settings_dict = {"seed": 42}
170
- output_tokens = max(2048, min(int(0.05 * get_model_context_length()) - 1024, 16384))
167
+ # Get current agent to use its method
168
+ from code_puppy.agents import get_current_agent_config
169
+ current_agent = get_current_agent_config()
170
+ output_tokens = max(2048, min(int(0.05 * current_agent.get_model_context_length()) - 1024, 16384))
171
171
  console.print(f"Max output tokens per message: {output_tokens}")
172
172
  model_settings_dict["max_tokens"] = output_tokens
173
173
 
@@ -1,13 +1,26 @@
1
1
  """Base agent configuration class for defining agent properties."""
2
2
 
3
+ import json
4
+ import queue
3
5
  import uuid
4
6
  from abc import ABC, abstractmethod
5
- from typing import Any, Dict, List, Optional, Set
7
+ from typing import Any, Dict, List, Optional, Set, Tuple
8
+
9
+ import pydantic
10
+ from pydantic_ai.messages import (
11
+ ModelMessage,
12
+ ModelRequest,
13
+ TextPart,
14
+ ToolCallPart,
15
+ ToolCallPartDelta,
16
+ ToolReturn,
17
+ ToolReturnPart,
18
+ )
6
19
 
7
20
 
8
21
  class BaseAgent(ABC):
9
22
  """Base class for all agent configurations."""
10
-
23
+
11
24
  def __init__(self):
12
25
  self.id = str(uuid.uuid4())
13
26
  self._message_history: List[Any] = []
@@ -123,3 +136,377 @@ class BaseAgent(ABC):
123
136
  """
124
137
  from ..config import get_agent_pinned_model
125
138
  return get_agent_pinned_model(self.name)
139
+
140
+ # Message history processing methods (moved from state_management.py and message_history_processor.py)
141
+ def _stringify_part(self, part: Any) -> str:
142
+ """Create a stable string representation for a message part.
143
+
144
+ We deliberately ignore timestamps so identical content hashes the same even when
145
+ emitted at different times. This prevents status updates from blowing up the
146
+ history when they are repeated with new timestamps."""
147
+
148
+ attributes: List[str] = [part.__class__.__name__]
149
+
150
+ # Role/instructions help disambiguate parts that otherwise share content
151
+ if hasattr(part, "role") and part.role:
152
+ attributes.append(f"role={part.role}")
153
+ if hasattr(part, "instructions") and part.instructions:
154
+ attributes.append(f"instructions={part.instructions}")
155
+
156
+ if hasattr(part, "tool_call_id") and part.tool_call_id:
157
+ attributes.append(f"tool_call_id={part.tool_call_id}")
158
+
159
+ if hasattr(part, "tool_name") and part.tool_name:
160
+ attributes.append(f"tool_name={part.tool_name}")
161
+
162
+ content = getattr(part, "content", None)
163
+ if content is None:
164
+ attributes.append("content=None")
165
+ elif isinstance(content, str):
166
+ attributes.append(f"content={content}")
167
+ elif isinstance(content, pydantic.BaseModel):
168
+ attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
169
+ elif isinstance(content, dict):
170
+ attributes.append(f"content={json.dumps(content, sort_keys=True)}")
171
+ else:
172
+ attributes.append(f"content={repr(content)}")
173
+ result = "|".join(attributes)
174
+ return result
175
+
176
+ def hash_message(self, message: Any) -> int:
177
+ """Create a stable hash for a model message that ignores timestamps."""
178
+ role = getattr(message, "role", None)
179
+ instructions = getattr(message, "instructions", None)
180
+ header_bits: List[str] = []
181
+ if role:
182
+ header_bits.append(f"role={role}")
183
+ if instructions:
184
+ header_bits.append(f"instructions={instructions}")
185
+
186
+ part_strings = [self._stringify_part(part) for part in getattr(message, "parts", [])]
187
+ canonical = "||".join(header_bits + part_strings)
188
+ return hash(canonical)
189
+
190
+ def stringify_message_part(self, part) -> str:
191
+ """
192
+ Convert a message part to a string representation for token estimation or other uses.
193
+
194
+ Args:
195
+ part: A message part that may contain content or be a tool call
196
+
197
+ Returns:
198
+ String representation of the message part
199
+ """
200
+ result = ""
201
+ if hasattr(part, "part_kind"):
202
+ result += part.part_kind + ": "
203
+ else:
204
+ result += str(type(part)) + ": "
205
+
206
+ # Handle content
207
+ if hasattr(part, "content") and part.content:
208
+ # Handle different content types
209
+ if isinstance(part.content, str):
210
+ result = part.content
211
+ elif isinstance(part.content, pydantic.BaseModel):
212
+ result = json.dumps(part.content.model_dump())
213
+ elif isinstance(part.content, dict):
214
+ result = json.dumps(part.content)
215
+ else:
216
+ result = str(part.content)
217
+
218
+ # Handle tool calls which may have additional token costs
219
+ # If part also has content, we'll process tool calls separately
220
+ if hasattr(part, "tool_name") and part.tool_name:
221
+ # Estimate tokens for tool name and parameters
222
+ tool_text = part.tool_name
223
+ if hasattr(part, "args"):
224
+ tool_text += f" {str(part.args)}"
225
+ result += tool_text
226
+
227
+ return result
228
+
229
+ def estimate_tokens_for_message(self, message: ModelMessage) -> int:
230
+ """
231
+ Estimate the number of tokens in a message using len(message) - 4.
232
+ Simple and fast replacement for tiktoken.
233
+ """
234
+ total_tokens = 0
235
+
236
+ for part in message.parts:
237
+ part_str = self.stringify_message_part(part)
238
+ if part_str:
239
+ total_tokens += len(part_str)
240
+
241
+ return int(max(1, total_tokens) / 4)
242
+
243
+ def _is_tool_call_part(self, part: Any) -> bool:
244
+ if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
245
+ return True
246
+
247
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
248
+ if part_kind == "tool-call":
249
+ return True
250
+
251
+ has_tool_name = getattr(part, "tool_name", None) is not None
252
+ has_args = getattr(part, "args", None) is not None
253
+ has_args_delta = getattr(part, "args_delta", None) is not None
254
+
255
+ return bool(has_tool_name and (has_args or has_args_delta))
256
+
257
+ def _is_tool_return_part(self, part: Any) -> bool:
258
+ if isinstance(part, (ToolReturnPart, ToolReturn)):
259
+ return True
260
+
261
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
262
+ if part_kind in {"tool-return", "tool-result"}:
263
+ return True
264
+
265
+ if getattr(part, "tool_call_id", None) is None:
266
+ return False
267
+
268
+ has_content = getattr(part, "content", None) is not None
269
+ has_content_delta = getattr(part, "content_delta", None) is not None
270
+ return bool(has_content or has_content_delta)
271
+
272
+ def filter_huge_messages(self, messages: List[ModelMessage]) -> List[ModelMessage]:
273
+ if not messages:
274
+ return []
275
+
276
+ # Never drop the system prompt, even if it is extremely large.
277
+ system_message, *rest = messages
278
+ filtered_rest = [
279
+ m for m in rest if self.estimate_tokens_for_message(m) < 50000
280
+ ]
281
+ return [system_message] + filtered_rest
282
+
283
+ def split_messages_for_protected_summarization(
284
+ self,
285
+ messages: List[ModelMessage],
286
+ ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
287
+ """
288
+ Split messages into two groups: messages to summarize and protected recent messages.
289
+
290
+ Returns:
291
+ Tuple of (messages_to_summarize, protected_messages)
292
+
293
+ The protected_messages are the most recent messages that total up to the configured protected token count.
294
+ The system message (first message) is always protected.
295
+ All other messages that don't fit in the protected zone will be summarized.
296
+ """
297
+ if len(messages) <= 1: # Just system message or empty
298
+ return [], messages
299
+
300
+ # Always protect the system message (first message)
301
+ system_message = messages[0]
302
+ system_tokens = self.estimate_tokens_for_message(system_message)
303
+
304
+ if len(messages) == 1:
305
+ return [], messages
306
+
307
+ # Get the configured protected token count
308
+ from ..config import get_protected_token_count
309
+ protected_tokens_limit = get_protected_token_count()
310
+
311
+ # Calculate tokens for messages from most recent backwards (excluding system message)
312
+ protected_messages = []
313
+ protected_token_count = system_tokens # Start with system message tokens
314
+
315
+ # Go backwards through non-system messages to find protected zone
316
+ for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
317
+ message = messages[i]
318
+ message_tokens = self.estimate_tokens_for_message(message)
319
+
320
+ # If adding this message would exceed protected tokens, stop here
321
+ if protected_token_count + message_tokens > protected_tokens_limit:
322
+ break
323
+
324
+ protected_messages.append(message)
325
+ protected_token_count += message_tokens
326
+
327
+ # Messages that were added while scanning backwards are currently in reverse order.
328
+ # Reverse them to restore chronological ordering, then prepend the system prompt.
329
+ protected_messages.reverse()
330
+ protected_messages.insert(0, system_message)
331
+
332
+ # Messages to summarize are everything between the system message and the
333
+ # protected tail zone we just constructed.
334
+ protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
335
+ messages_to_summarize = messages[1:protected_start_idx]
336
+
337
+ # Emit info messages
338
+ from ..messaging import emit_info
339
+ emit_info(
340
+ f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
341
+ )
342
+ emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
343
+
344
+ return messages_to_summarize, protected_messages
345
+
346
+ def summarize_messages(
347
+ self,
348
+ messages: List[ModelMessage],
349
+ with_protection: bool = True
350
+ ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
351
+ """
352
+ Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
353
+
354
+ Returns:
355
+ Tuple of (compacted_messages, summarized_source_messages)
356
+ where compacted_messages always preserves the original system message
357
+ as the first entry.
358
+ """
359
+ messages_to_summarize: List[ModelMessage]
360
+ protected_messages: List[ModelMessage]
361
+
362
+ if with_protection:
363
+ messages_to_summarize, protected_messages = (
364
+ self.split_messages_for_protected_summarization(messages)
365
+ )
366
+ else:
367
+ messages_to_summarize = messages[1:] if messages else []
368
+ protected_messages = messages[:1]
369
+
370
+ if not messages:
371
+ return [], []
372
+
373
+ system_message = messages[0]
374
+
375
+ if not messages_to_summarize:
376
+ # Nothing to summarize, so just return the original sequence
377
+ return self.prune_interrupted_tool_calls(messages), []
378
+
379
+ instructions = (
380
+ "The input will be a log of Agentic AI steps that have been taken"
381
+ " as well as user queries, etc. Summarize the contents of these steps."
382
+ " The high level details should remain but the bulk of the content from tool-call"
383
+ " responses should be compacted and summarized. For example if you see a tool-call"
384
+ " reading a file, and the file contents are large, then in your summary you might just"
385
+ " write: * used read_file on space_invaders.cpp - contents removed."
386
+ "\n Make sure your result is a bulleted list of all steps and interactions."
387
+ "\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
388
+ )
389
+
390
+ try:
391
+ from ..summarization_agent import run_summarization_sync
392
+ new_messages = run_summarization_sync(
393
+ instructions, message_history=messages_to_summarize
394
+ )
395
+
396
+ if not isinstance(new_messages, list):
397
+ from ..messaging import emit_warning
398
+ emit_warning(
399
+ "Summarization agent returned non-list output; wrapping into message request"
400
+ )
401
+ new_messages = [ModelRequest([TextPart(str(new_messages))])]
402
+
403
+ compacted: List[ModelMessage] = [system_message] + list(new_messages)
404
+
405
+ # Drop the system message from protected_messages because we already included it
406
+ protected_tail = [msg for msg in protected_messages if msg is not system_message]
407
+
408
+ compacted.extend(protected_tail)
409
+
410
+ return self.prune_interrupted_tool_calls(compacted), messages_to_summarize
411
+ except Exception as e:
412
+ from ..messaging import emit_error
413
+ emit_error(f"Summarization failed during compaction: {e}")
414
+ return messages, [] # Return original messages on failure
415
+
416
+ def summarize_message(self, message: ModelMessage) -> ModelMessage:
417
+ try:
418
+ # If the message looks like a system/instructions message, skip summarization
419
+ instructions = getattr(message, "instructions", None)
420
+ if instructions:
421
+ return message
422
+ # If any part is a tool call, skip summarization
423
+ for part in message.parts:
424
+ if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
425
+ return message
426
+ # Build prompt from textual content parts
427
+ content_bits: List[str] = []
428
+ for part in message.parts:
429
+ s = self.stringify_message_part(part)
430
+ if s:
431
+ content_bits.append(s)
432
+ if not content_bits:
433
+ return message
434
+ prompt = "Please summarize the following user message:\n" + "\n".join(
435
+ content_bits
436
+ )
437
+
438
+ from ..summarization_agent import run_summarization_sync
439
+ output_text = run_summarization_sync(prompt)
440
+ summarized = ModelRequest([TextPart(output_text)])
441
+ return summarized
442
+ except Exception as e:
443
+ from ..messaging import emit_error
444
+ emit_error(f"Summarization failed: {e}")
445
+ return message
446
+
447
+ def get_model_context_length(self) -> int:
448
+ """
449
+ Get the context length for the currently configured model from models.json
450
+ """
451
+ from ..config import get_model_name
452
+ from ..model_factory import ModelFactory
453
+
454
+ model_configs = ModelFactory.load_config()
455
+ model_name = get_model_name()
456
+
457
+ # Get context length from model config
458
+ model_config = model_configs.get(model_name, {})
459
+ context_length = model_config.get("context_length", 128000) # Default value
460
+
461
+ return int(context_length)
462
+
463
+ def prune_interrupted_tool_calls(self, messages: List[ModelMessage]) -> List[ModelMessage]:
464
+ """
465
+ Remove any messages that participate in mismatched tool call sequences.
466
+
467
+ A mismatched tool call id is one that appears in a ToolCall (model/tool request)
468
+ without a corresponding tool return, or vice versa. We preserve original order
469
+ and only drop messages that contain parts referencing mismatched tool_call_ids.
470
+ """
471
+ if not messages:
472
+ return messages
473
+
474
+ tool_call_ids: Set[str] = set()
475
+ tool_return_ids: Set[str] = set()
476
+
477
+ # First pass: collect ids for calls vs returns
478
+ for msg in messages:
479
+ for part in getattr(msg, "parts", []) or []:
480
+ tool_call_id = getattr(part, "tool_call_id", None)
481
+ if not tool_call_id:
482
+ continue
483
+
484
+ if self._is_tool_call_part(part) and not self._is_tool_return_part(part):
485
+ tool_call_ids.add(tool_call_id)
486
+ elif self._is_tool_return_part(part):
487
+ tool_return_ids.add(tool_call_id)
488
+
489
+ mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
490
+ if not mismatched:
491
+ return messages
492
+
493
+ pruned: List[ModelMessage] = []
494
+ dropped_count = 0
495
+ for msg in messages:
496
+ has_mismatched = False
497
+ for part in getattr(msg, "parts", []) or []:
498
+ tcid = getattr(part, "tool_call_id", None)
499
+ if tcid and tcid in mismatched:
500
+ has_mismatched = True
501
+ break
502
+ if has_mismatched:
503
+ dropped_count += 1
504
+ continue
505
+ pruned.append(msg)
506
+
507
+ if dropped_count:
508
+ from ..messaging import emit_warning
509
+ emit_warning(
510
+ f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
511
+ )
512
+ return pruned
@@ -8,7 +8,7 @@ import os
8
8
  from typing import List, Optional
9
9
 
10
10
  from code_puppy.messaging import emit_info
11
- from code_puppy.state_management import is_tui_mode
11
+ from code_puppy.tui_state import is_tui_mode
12
12
 
13
13
  from .base import MCPCommandBase
14
14
  from .wizard_utils import run_interactive_install_wizard
@@ -6,7 +6,7 @@ import logging
6
6
  from typing import List, Optional
7
7
 
8
8
  from code_puppy.messaging import emit_info
9
- from code_puppy.state_management import is_tui_mode
9
+ from code_puppy.tui_state import is_tui_mode
10
10
 
11
11
  from .base import MCPCommandBase
12
12
  from .wizard_utils import run_interactive_install_wizard
code_puppy/config.py CHANGED
@@ -115,7 +115,6 @@ def get_config_keys():
115
115
  default_keys = [
116
116
  "yolo_mode",
117
117
  "model",
118
- "vqa_model_name",
119
118
  "compaction_strategy",
120
119
  "protected_token_count",
121
120
  "compaction_threshold",
code_puppy/main.py CHANGED
@@ -29,7 +29,8 @@ from code_puppy.message_history_processor import (
29
29
  message_history_accumulator,
30
30
  prune_interrupted_tool_calls,
31
31
  )
32
- from code_puppy.state_management import is_tui_mode, set_message_history, set_tui_mode
32
+ from code_puppy.state_management import set_message_history
33
+ from code_puppy.tui_state import is_tui_mode, set_tui_mode
33
34
  from code_puppy.tools.common import console
34
35
  from code_puppy.version_checker import default_version_mismatch_behavior
35
36
 
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  import queue
3
- from typing import Any, List, Set, Tuple
3
+ from typing import Any, List, Set, Tuple, Union
4
4
 
5
5
  import pydantic
6
6
  from pydantic_ai.messages import (
@@ -25,7 +25,6 @@ from code_puppy.state_management import (
25
25
  add_compacted_message_hash,
26
26
  get_compacted_message_hashes,
27
27
  get_message_history,
28
- hash_message,
29
28
  set_message_history,
30
29
  )
31
30
  from code_puppy.summarization_agent import run_summarization_sync
@@ -44,34 +43,10 @@ def stringify_message_part(part) -> str:
44
43
  Returns:
45
44
  String representation of the message part
46
45
  """
47
- result = ""
48
- if hasattr(part, "part_kind"):
49
- result += part.part_kind + ": "
50
- else:
51
- result += str(type(part)) + ": "
52
-
53
- # Handle content
54
- if hasattr(part, "content") and part.content:
55
- # Handle different content types
56
- if isinstance(part.content, str):
57
- result = part.content
58
- elif isinstance(part.content, pydantic.BaseModel):
59
- result = json.dumps(part.content.model_dump())
60
- elif isinstance(part.content, dict):
61
- result = json.dumps(part.content)
62
- else:
63
- result = str(part.content)
64
-
65
- # Handle tool calls which may have additional token costs
66
- # If part also has content, we'll process tool calls separately
67
- if hasattr(part, "tool_name") and part.tool_name:
68
- # Estimate tokens for tool name and parameters
69
- tool_text = part.tool_name
70
- if hasattr(part, "args"):
71
- tool_text += f" {str(part.args)}"
72
- result += tool_text
73
-
74
- return result
46
+ # Get current agent to use its method
47
+ from code_puppy.agents.agent_manager import get_current_agent_config
48
+ current_agent = get_current_agent_config()
49
+ return current_agent.stringify_message_part(part)
75
50
 
76
51
 
77
52
  def estimate_tokens_for_message(message: ModelMessage) -> int:
@@ -79,61 +54,44 @@ def estimate_tokens_for_message(message: ModelMessage) -> int:
79
54
  Estimate the number of tokens in a message using len(message) - 4.
80
55
  Simple and fast replacement for tiktoken.
81
56
  """
82
- total_tokens = 0
83
-
84
- for part in message.parts:
85
- part_str = stringify_message_part(part)
86
- if part_str:
87
- total_tokens += len(part_str)
88
-
89
- return int(max(1, total_tokens) / 4)
57
+ # Get current agent to use its method
58
+ from code_puppy.agents.agent_manager import get_current_agent_config
59
+ current_agent = get_current_agent_config()
60
+ return current_agent.estimate_tokens_for_message(message)
90
61
 
91
62
 
92
63
  def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
93
64
  if not messages:
94
65
  return []
95
66
 
67
+ # Get current agent to use its method
68
+ from code_puppy.agents.agent_manager import get_current_agent_config
69
+ current_agent = get_current_agent_config()
70
+
96
71
  # Never drop the system prompt, even if it is extremely large.
97
72
  system_message, *rest = messages
98
73
  filtered_rest = [
99
- m for m in rest if estimate_tokens_for_message(m) < 50000
74
+ m for m in rest if current_agent.estimate_tokens_for_message(m) < 50000
100
75
  ]
101
76
  return [system_message] + filtered_rest
102
77
 
103
78
 
104
79
  def _is_tool_call_part(part: Any) -> bool:
105
- if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
106
- return True
107
-
108
- part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
109
- if part_kind == "tool-call":
110
- return True
111
-
112
- has_tool_name = getattr(part, "tool_name", None) is not None
113
- has_args = getattr(part, "args", None) is not None
114
- has_args_delta = getattr(part, "args_delta", None) is not None
115
-
116
- return bool(has_tool_name and (has_args or has_args_delta))
80
+ # Get current agent to use its method
81
+ from code_puppy.agents.agent_manager import get_current_agent_config
82
+ current_agent = get_current_agent_config()
83
+ return current_agent._is_tool_call_part(part)
117
84
 
118
85
 
119
86
  def _is_tool_return_part(part: Any) -> bool:
120
- if isinstance(part, (ToolReturnPart, ToolReturn)):
121
- return True
122
-
123
- part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
124
- if part_kind in {"tool-return", "tool-result"}:
125
- return True
126
-
127
- if getattr(part, "tool_call_id", None) is None:
128
- return False
129
-
130
- has_content = getattr(part, "content", None) is not None
131
- has_content_delta = getattr(part, "content_delta", None) is not None
132
- return bool(has_content or has_content_delta)
87
+ # Get current agent to use its method
88
+ from code_puppy.agents.agent_manager import get_current_agent_config
89
+ current_agent = get_current_agent_config()
90
+ return current_agent._is_tool_return_part(part)
133
91
 
134
92
 
135
93
  def split_messages_for_protected_summarization(
136
- messages: List[ModelMessage],
94
+ messages: List[ModelMessage], with_protection: bool = True
137
95
  ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
138
96
  """
139
97
  Split messages into two groups: messages to summarize and protected recent messages.
@@ -150,7 +108,13 @@ def split_messages_for_protected_summarization(
150
108
 
151
109
  # Always protect the system message (first message)
152
110
  system_message = messages[0]
153
- system_tokens = estimate_tokens_for_message(system_message)
111
+ from code_puppy.agents.agent_manager import get_current_agent_config
112
+ current_agent = get_current_agent_config()
113
+ system_tokens = current_agent.estimate_tokens_for_message(system_message)
114
+
115
+ if not with_protection:
116
+ # If not protecting, summarize everything except the system message
117
+ return messages[1:], [system_message]
154
118
 
155
119
  if len(messages) == 1:
156
120
  return [], messages
@@ -165,7 +129,7 @@ def split_messages_for_protected_summarization(
165
129
  # Go backwards through non-system messages to find protected zone
166
130
  for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
167
131
  message = messages[i]
168
- message_tokens = estimate_tokens_for_message(message)
132
+ message_tokens = current_agent.estimate_tokens_for_message(message)
169
133
 
170
134
  # If adding this message would exceed protected tokens, stop here
171
135
  if protected_token_count + message_tokens > protected_tokens_limit:
@@ -192,6 +156,18 @@ def split_messages_for_protected_summarization(
192
156
  return messages_to_summarize, protected_messages
193
157
 
194
158
 
159
+ def run_summarization_sync(
160
+ instructions: str,
161
+ message_history: List[ModelMessage],
162
+ ) -> Union[List[ModelMessage], str]:
163
+ """
164
+ Run summarization synchronously using the configured summarization agent.
165
+ This is exposed as a global function so tests can mock it.
166
+ """
167
+ from code_puppy.summarization_agent import run_summarization_sync as _run_summarization_sync
168
+ return _run_summarization_sync(instructions, message_history)
169
+
170
+
195
171
  def summarize_messages(
196
172
  messages: List[ModelMessage], with_protection: bool = True
197
173
  ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
@@ -203,26 +179,22 @@ def summarize_messages(
203
179
  where compacted_messages always preserves the original system message
204
180
  as the first entry.
205
181
  """
206
- messages_to_summarize: List[ModelMessage]
207
- protected_messages: List[ModelMessage]
208
-
209
- if with_protection:
210
- messages_to_summarize, protected_messages = (
211
- split_messages_for_protected_summarization(messages)
212
- )
213
- else:
214
- messages_to_summarize = messages[1:] if messages else []
215
- protected_messages = messages[:1]
216
-
217
182
  if not messages:
218
183
  return [], []
219
184
 
220
- system_message = messages[0]
185
+ # Split messages into those to summarize and those to protect
186
+ messages_to_summarize, protected_messages = split_messages_for_protected_summarization(
187
+ messages, with_protection
188
+ )
221
189
 
190
+ # If nothing to summarize, return the original list
222
191
  if not messages_to_summarize:
223
- # Nothing to summarize, so just return the original sequence
224
192
  return prune_interrupted_tool_calls(messages), []
225
193
 
194
+ # Get the system message (always the first message)
195
+ system_message = messages[0]
196
+
197
+ # Instructions for the summarization agent
226
198
  instructions = (
227
199
  "The input will be a log of Agentic AI steps that have been taken"
228
200
  " as well as user queries, etc. Summarize the contents of these steps."
@@ -235,6 +207,7 @@ def summarize_messages(
235
207
  )
236
208
 
237
209
  try:
210
+ # Use the global function so tests can mock it
238
211
  new_messages = run_summarization_sync(
239
212
  instructions, message_history=messages_to_summarize
240
213
  )
@@ -245,6 +218,7 @@ def summarize_messages(
245
218
  )
246
219
  new_messages = [ModelRequest([TextPart(str(new_messages))])]
247
220
 
221
+ # Construct compacted messages: system message + new summarized messages + protected tail
248
222
  compacted: List[ModelMessage] = [system_message] + list(new_messages)
249
223
 
250
224
  # Drop the system message from protected_messages because we already included it
@@ -259,47 +233,22 @@ def summarize_messages(
259
233
 
260
234
 
261
235
  def summarize_message(message: ModelMessage) -> ModelMessage:
262
- try:
263
- # If the message looks like a system/instructions message, skip summarization
264
- instructions = getattr(message, "instructions", None)
265
- if instructions:
266
- return message
267
- # If any part is a tool call, skip summarization
268
- for part in message.parts:
269
- if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
270
- return message
271
- # Build prompt from textual content parts
272
- content_bits: List[str] = []
273
- for part in message.parts:
274
- s = stringify_message_part(part)
275
- if s:
276
- content_bits.append(s)
277
- if not content_bits:
278
- return message
279
- prompt = "Please summarize the following user message:\n" + "\n".join(
280
- content_bits
281
- )
282
- output_text = run_summarization_sync(prompt)
283
- summarized = ModelRequest([TextPart(output_text)])
284
- return summarized
285
- except Exception as e:
286
- emit_error(f"Summarization failed: {e}")
287
- return message
236
+ # Get current agent to use its method
237
+ from code_puppy.agents.agent_manager import get_current_agent_config
238
+ current_agent = get_current_agent_config()
239
+
240
+ return current_agent.summarize_message(message)
288
241
 
289
242
 
290
243
  def get_model_context_length() -> int:
291
244
  """
292
245
  Get the context length for the currently configured model from models.json
293
246
  """
294
- model_configs = ModelFactory.load_config()
295
- model_name = get_model_name()
296
-
297
- # Get context length from model config
298
- model_config = model_configs.get(model_name, {})
299
- context_length = model_config.get("context_length", 128000) # Default value
300
-
301
- # Reserve 10% of context for response
302
- return int(context_length)
247
+ # Get current agent to use its method
248
+ from code_puppy.agents.agent_manager import get_current_agent_config
249
+ current_agent = get_current_agent_config()
250
+
251
+ return current_agent.get_model_context_length()
303
252
 
304
253
 
305
254
  def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMessage]:
@@ -310,62 +259,30 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
310
259
  without a corresponding tool return, or vice versa. We preserve original order
311
260
  and only drop messages that contain parts referencing mismatched tool_call_ids.
312
261
  """
313
- if not messages:
314
- return messages
315
-
316
- tool_call_ids: Set[str] = set()
317
- tool_return_ids: Set[str] = set()
318
-
319
- # First pass: collect ids for calls vs returns
320
- for msg in messages:
321
- for part in getattr(msg, "parts", []) or []:
322
- tool_call_id = getattr(part, "tool_call_id", None)
323
- if not tool_call_id:
324
- continue
325
-
326
- if _is_tool_call_part(part) and not _is_tool_return_part(part):
327
- tool_call_ids.add(tool_call_id)
328
- elif _is_tool_return_part(part):
329
- tool_return_ids.add(tool_call_id)
330
-
331
- mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
332
- if not mismatched:
333
- return messages
334
-
335
- pruned: List[ModelMessage] = []
336
- dropped_count = 0
337
- for msg in messages:
338
- has_mismatched = False
339
- for part in getattr(msg, "parts", []) or []:
340
- tcid = getattr(part, "tool_call_id", None)
341
- if tcid and tcid in mismatched:
342
- has_mismatched = True
343
- break
344
- if has_mismatched:
345
- dropped_count += 1
346
- continue
347
- pruned.append(msg)
348
-
349
- if dropped_count:
350
- emit_warning(
351
- f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
352
- )
353
- return pruned
262
+ # Get current agent to use its method
263
+ from code_puppy.agents.agent_manager import get_current_agent_config
264
+ current_agent = get_current_agent_config()
265
+
266
+ return current_agent.prune_interrupted_tool_calls(messages)
354
267
 
355
268
 
356
269
  def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
357
- cleaned_history = prune_interrupted_tool_calls(messages)
270
+ # Get current agent to use its methods
271
+ from code_puppy.agents.agent_manager import get_current_agent_config
272
+ current_agent = get_current_agent_config()
273
+
274
+ cleaned_history = current_agent.prune_interrupted_tool_calls(messages)
358
275
 
359
276
  total_current_tokens = sum(
360
- estimate_tokens_for_message(msg) for msg in cleaned_history
277
+ current_agent.estimate_tokens_for_message(msg) for msg in cleaned_history
361
278
  )
362
279
 
363
- model_max = get_model_context_length()
280
+ model_max = current_agent.get_model_context_length()
364
281
 
365
282
  proportion_used = total_current_tokens / model_max if model_max else 0
366
283
 
367
284
  # Check if we're in TUI mode and can update the status bar
368
- from code_puppy.state_management import get_tui_app_instance, is_tui_mode
285
+ from code_puppy.tui_state import get_tui_app_instance, is_tui_mode
369
286
 
370
287
  if is_tui_mode():
371
288
  tui_app = get_tui_app_instance()
@@ -401,7 +318,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
401
318
  compaction_strategy = get_compaction_strategy()
402
319
 
403
320
  if proportion_used > compaction_threshold:
404
- filtered_history = filter_huge_messages(cleaned_history)
321
+ filtered_history = current_agent.filter_huge_messages(cleaned_history)
405
322
 
406
323
  if compaction_strategy == "truncation":
407
324
  protected_tokens = get_protected_token_count()
@@ -413,7 +330,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
413
330
  )
414
331
 
415
332
  final_token_count = sum(
416
- estimate_tokens_for_message(msg) for msg in result_messages
333
+ current_agent.estimate_tokens_for_message(msg) for msg in result_messages
417
334
  )
418
335
  # Update status bar with final token count if in TUI mode
419
336
  if is_tui_mode():
@@ -438,7 +355,7 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
438
355
  emit_info(f"Final token count after processing: {final_token_count}")
439
356
  set_message_history(result_messages)
440
357
  for m in summarized_messages:
441
- add_compacted_message_hash(hash_message(m))
358
+ add_compacted_message_hash(current_agent.hash_message(m))
442
359
  return result_messages
443
360
 
444
361
  set_message_history(cleaned_history)
@@ -471,11 +388,16 @@ def truncation(
471
388
 
472
389
  def message_history_accumulator(messages: List[Any]):
473
390
  existing_history = list(get_message_history())
474
- seen_hashes = {hash_message(message) for message in existing_history}
391
+
392
+ # Get current agent to use its method
393
+ from code_puppy.agents.agent_manager import get_current_agent_config
394
+ current_agent = get_current_agent_config()
395
+
396
+ seen_hashes = {current_agent.hash_message(message) for message in existing_history}
475
397
  compacted_hashes = get_compacted_message_hashes()
476
398
 
477
399
  for message in messages:
478
- message_hash = hash_message(message)
400
+ message_hash = current_agent.hash_message(message)
479
401
  if message_hash in seen_hashes or message_hash in compacted_hashes:
480
402
  continue
481
403
  existing_history.append(message)
@@ -219,7 +219,7 @@ class MessageQueue:
219
219
  start_time = time.time()
220
220
 
221
221
  # Check if we're in TUI mode - if so, try to yield control to the event loop
222
- from code_puppy.state_management import is_tui_mode
222
+ from code_puppy.tui_state import is_tui_mode
223
223
 
224
224
  sleep_interval = 0.05 if is_tui_mode() else 0.1
225
225
 
@@ -243,7 +243,7 @@ class MessageQueue:
243
243
 
244
244
  def provide_prompt_response(self, prompt_id: str, response: str):
245
245
  """Provide a response to a human input request."""
246
- from code_puppy.state_management import is_tui_mode
246
+ from code_puppy.tui_state import is_tui_mode
247
247
 
248
248
  if is_tui_mode():
249
249
  print(f"[DEBUG] Providing response for {prompt_id}: {response[:20]}...")
@@ -337,7 +337,7 @@ def emit_system_message(content: Any, **metadata):
337
337
 
338
338
  def emit_divider(content: str = "[dim]" + "─" * 100 + "\n" + "[/dim]", **metadata):
339
339
  """Emit a divider line"""
340
- from code_puppy.state_management import is_tui_mode
340
+ from code_puppy.tui_state import is_tui_mode
341
341
 
342
342
  if not is_tui_mode():
343
343
  emit_message(MessageType.DIVIDER, content, **metadata)
@@ -347,7 +347,7 @@ def emit_divider(content: str = "[dim]" + "─" * 100 + "\n" + "[/dim]", **metad
347
347
 
348
348
  def emit_prompt(prompt_text: str, timeout: float = None) -> str:
349
349
  """Emit a human input request and wait for response."""
350
- from code_puppy.state_management import is_tui_mode
350
+ from code_puppy.tui_state import is_tui_mode
351
351
 
352
352
  # In interactive mode, use direct input instead of the queue system
353
353
  if not is_tui_mode():
@@ -1,14 +1,8 @@
1
- import json
2
1
  from types import ModuleType
3
2
  from typing import Any, List, Set
4
3
 
5
- import pydantic
6
-
7
4
  from code_puppy.messaging import emit_info
8
5
 
9
- _tui_mode: bool = False
10
- _tui_app_instance: Any = None
11
-
12
6
 
13
7
  def _require_agent_manager() -> ModuleType:
14
8
  """Import the agent manager module, raising if it is unavailable."""
@@ -31,53 +25,6 @@ def get_compacted_message_hashes() -> Set[str]:
31
25
  return manager.get_current_agent_compacted_message_hashes()
32
26
 
33
27
 
34
- def set_tui_mode(enabled: bool) -> None:
35
- """Set the global TUI mode state.
36
-
37
- Args:
38
- enabled: True if running in TUI mode, False otherwise
39
- """
40
- global _tui_mode
41
- _tui_mode = enabled
42
-
43
-
44
- def is_tui_mode() -> bool:
45
- """Check if the application is running in TUI mode.
46
-
47
- Returns:
48
- True if running in TUI mode, False otherwise
49
- """
50
- return _tui_mode
51
-
52
-
53
- def set_tui_app_instance(app_instance: Any) -> None:
54
- """Set the global TUI app instance reference.
55
-
56
- Args:
57
- app_instance: The TUI app instance
58
- """
59
- global _tui_app_instance
60
- _tui_app_instance = app_instance
61
-
62
-
63
- def get_tui_app_instance() -> Any:
64
- """Get the current TUI app instance.
65
-
66
- Returns:
67
- The TUI app instance if available, None otherwise
68
- """
69
- return _tui_app_instance
70
-
71
-
72
- def get_tui_mode() -> bool:
73
- """Get the current TUI mode state.
74
-
75
- Returns:
76
- True if running in TUI mode, False otherwise
77
- """
78
- return _tui_mode
79
-
80
-
81
28
  def get_message_history() -> List[Any]:
82
29
  """Get message history for the active agent."""
83
30
  manager = _require_agent_manager()
@@ -108,52 +55,4 @@ def extend_message_history(history: List[Any]) -> None:
108
55
  manager.extend_current_agent_message_history(history)
109
56
 
110
57
 
111
- def _stringify_part(part: Any) -> str:
112
- """Create a stable string representation for a message part.
113
-
114
- We deliberately ignore timestamps so identical content hashes the same even when
115
- emitted at different times. This prevents status updates from blowing up the
116
- history when they are repeated with new timestamps."""
117
-
118
- attributes: List[str] = [part.__class__.__name__]
119
-
120
- # Role/instructions help disambiguate parts that otherwise share content
121
- if hasattr(part, "role") and part.role:
122
- attributes.append(f"role={part.role}")
123
- if hasattr(part, "instructions") and part.instructions:
124
- attributes.append(f"instructions={part.instructions}")
125
-
126
- if hasattr(part, "tool_call_id") and part.tool_call_id:
127
- attributes.append(f"tool_call_id={part.tool_call_id}")
128
-
129
- if hasattr(part, "tool_name") and part.tool_name:
130
- attributes.append(f"tool_name={part.tool_name}")
131
-
132
- content = getattr(part, "content", None)
133
- if content is None:
134
- attributes.append("content=None")
135
- elif isinstance(content, str):
136
- attributes.append(f"content={content}")
137
- elif isinstance(content, pydantic.BaseModel):
138
- attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
139
- elif isinstance(content, dict):
140
- attributes.append(f"content={json.dumps(content, sort_keys=True)}")
141
- else:
142
- attributes.append(f"content={repr(content)}")
143
- result = "|".join(attributes)
144
- return result
145
-
146
-
147
- def hash_message(message: Any) -> int:
148
- """Create a stable hash for a model message that ignores timestamps."""
149
- role = getattr(message, "role", None)
150
- instructions = getattr(message, "instructions", None)
151
- header_bits: List[str] = []
152
- if role:
153
- header_bits.append(f"role={role}")
154
- if instructions:
155
- header_bits.append(f"instructions={instructions}")
156
-
157
- part_strings = [_stringify_part(part) for part in getattr(message, "parts", [])]
158
- canonical = "||".join(header_bits + part_strings)
159
- return hash(canonical)
58
+
@@ -19,7 +19,7 @@ from code_puppy.messaging import (
19
19
  emit_system_message,
20
20
  emit_warning,
21
21
  )
22
- from code_puppy.state_management import is_tui_mode
22
+ from code_puppy.tui_state import is_tui_mode
23
23
  from code_puppy.tools.common import generate_group_id
24
24
 
25
25
  # Maximum line length for shell command output to prevent massive token usage
code_puppy/tui/app.py CHANGED
@@ -149,7 +149,7 @@ class CodePuppyTUI(App):
149
149
  def on_mount(self) -> None:
150
150
  """Initialize the application when mounted."""
151
151
  # Register this app instance for global access
152
- from code_puppy.state_management import set_tui_app_instance
152
+ from code_puppy.tui_state import set_tui_app_instance
153
153
 
154
154
  set_tui_app_instance(self)
155
155
 
@@ -0,0 +1,55 @@
1
+ # TUI State Management
2
+ # This module contains functions for managing the global TUI state
3
+
4
+ from typing import Any
5
+
6
+ # Global TUI state variables
7
+ _tui_mode: bool = False
8
+ _tui_app_instance: Any = None
9
+
10
+
11
+ def set_tui_mode(enabled: bool) -> None:
12
+ """Set the global TUI mode state.
13
+
14
+ Args:
15
+ enabled: True if running in TUI mode, False otherwise
16
+ """
17
+ global _tui_mode
18
+ _tui_mode = enabled
19
+
20
+
21
+ def is_tui_mode() -> bool:
22
+ """Check if the application is running in TUI mode.
23
+
24
+ Returns:
25
+ True if running in TUI mode, False otherwise
26
+ """
27
+ return _tui_mode
28
+
29
+
30
+ def set_tui_app_instance(app_instance: Any) -> None:
31
+ """Set the global TUI app instance reference.
32
+
33
+ Args:
34
+ app_instance: The TUI app instance
35
+ """
36
+ global _tui_app_instance
37
+ _tui_app_instance = app_instance
38
+
39
+
40
+ def get_tui_app_instance() -> Any:
41
+ """Get the current TUI app instance.
42
+
43
+ Returns:
44
+ The TUI app instance if available, None otherwise
45
+ """
46
+ return _tui_app_instance
47
+
48
+
49
+ def get_tui_mode() -> bool:
50
+ """Get the current TUI mode state.
51
+
52
+ Returns:
53
+ True if running in TUI mode, False otherwise
54
+ """
55
+ return _tui_mode
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.172
3
+ Version: 0.0.173
4
4
  Summary: Code generation agent
5
5
  Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
6
6
  Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
@@ -1,18 +1,19 @@
1
1
  code_puppy/__init__.py,sha256=ehbM1-wMjNmOXk_DBhhJECFyBv2dRHwwo7ucjHeM68E,107
2
2
  code_puppy/__main__.py,sha256=pDVssJOWP8A83iFkxMLY9YteHYat0EyWDQqMkKHpWp4,203
3
- code_puppy/agent.py,sha256=G0eSd7RLFw0A1Jf6ZJFgtnzMD9PX9HA-QuKmbtHs4v8,8403
3
+ code_puppy/agent.py,sha256=rBe7OS_ekZvMuteLuIp9lor9zIP7KHBsxZ22pWM9_fg,8526
4
4
  code_puppy/callbacks.py,sha256=6wYB6K_fGSCkKKEFaYOYkJT45WaV5W_NhUIzcvVH_nU,5060
5
- code_puppy/config.py,sha256=g0X-X_Wieo94k0hmwSXqpB5nPIif1H5p7Zw47A7aDmo,20651
5
+ code_puppy/config.py,sha256=A3l5gSJGz9kGXJb2pZVyoksDGclIhMdKmAv-EdDSleY,20625
6
6
  code_puppy/http_utils.py,sha256=jnElpVNNIK-_gcqwsBYRImDGU-tYNfsLR7Gtme5CYzs,8423
7
- code_puppy/main.py,sha256=tYLfhUjPTJ-4S1r-pr-jSbn6kIU1iYvt2Z8lxI7zDFY,22220
8
- code_puppy/message_history_processor.py,sha256=TsmH4L4srbk0JStaVIcLFoFqm6DVkj1yW3yyY2X_RqY,17823
7
+ code_puppy/main.py,sha256=VlH8fXPapXblSagOjrezeSTtFij_-X3dzq1cfHPXwDI,22252
8
+ code_puppy/message_history_processor.py,sha256=SnVpUCMkbtM4RX5b6ioeG-fh--ri0bh8_yHp7ZQIAPE,16005
9
9
  code_puppy/model_factory.py,sha256=z9vQbcGllgMwU0On8rPvzYxkygW2Uyd3NJmRzbKv-is,13759
10
10
  code_puppy/models.json,sha256=iXmLZGflnQcu2DRh4WUlgAhoXdvoxUc7KBhB8YxawXM,3088
11
11
  code_puppy/reopenable_async_client.py,sha256=4UJRaMp5np8cbef9F0zKQ7TPKOfyf5U-Kv-0zYUWDho,8274
12
12
  code_puppy/round_robin_model.py,sha256=SEN3VSwTgC5wHjx2sZsHQLPWOycf4jGwzB-EydgqkdY,5643
13
- code_puppy/state_management.py,sha256=wAK8J63DHta0FQg89dn_FihW60aNU-wZI9cYv5XsfH0,4999
13
+ code_puppy/state_management.py,sha256=OyBSfdicKN7ObZEGs6Kbyy1JC8R8n4FZBOLIBvQZTBc,1916
14
14
  code_puppy/status_display.py,sha256=F6eEAkGePDp4StM2BWj-uLLQTDGtJrf0IufzCeP1rRg,8336
15
15
  code_puppy/summarization_agent.py,sha256=kos4_YK-l_YjYRq4Fs4X5YoTUbmAcDhhPqefL-rdenI,3197
16
+ code_puppy/tui_state.py,sha256=TT76XBVapKj6fKjFzz6oxCONeN_BZwcMILxxZcxu6-Y,1171
16
17
  code_puppy/version_checker.py,sha256=bjLDmgGPrl7XnYwX1u13O8uFlsfikV90PK6nbA9Z9QU,1150
17
18
  code_puppy/agents/__init__.py,sha256=SwtHGNG1GIgDBv7y3EGIXOXEWrG_Ou7fEknNgFbrHv8,594
18
19
  code_puppy/agents/agent_code_puppy.py,sha256=sbuQxLzlkMbPOyLbILbXOAHecRsxbFdQt13HJ_GEqQo,7972
@@ -20,7 +21,7 @@ code_puppy/agents/agent_creator_agent.py,sha256=eNOlJssQdyoQm1F7d-TZWcMXpkYmZ-w9
20
21
  code_puppy/agents/agent_manager.py,sha256=nXvro6fpX8KA-NedRoVJuhJW966trrePOrH4eAnqq40,17034
21
22
  code_puppy/agents/agent_orchestrator.json,sha256=Iqnc0p6ICoAlUTMkEsi1XXMXJi4pdxVnWZUMaih6s5o,1267
22
23
  code_puppy/agents/agent_qa_kitten.py,sha256=5PeFFSwCFlTUvP6h5bGntx0xv5NmRwBiw0HnMqY8nLI,9107
23
- code_puppy/agents/base_agent.py,sha256=mll5s4Lsc6V-YcBJJT-oyXC50LZBGeR7xAJLBk62Zf8,3769
24
+ code_puppy/agents/base_agent.py,sha256=XUsziIf0ZGCe6ZLjt-kJqbeYTy9gTQE3_BPq7THQ3X8,19538
24
25
  code_puppy/agents/json_agent.py,sha256=y6AYE3Fx9LhmemcPzt46d7359MNnkGIjU83YBGNer2g,4533
25
26
  code_puppy/agents/runtime_manager.py,sha256=fUOBpmETo3wTyLc5wWBfGKSX1HFRQWSpuwxYAOyA-_8,10059
26
27
  code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
@@ -33,11 +34,11 @@ code_puppy/command_line/motd.py,sha256=PEdkp3ZnydVfvd7mNJylm8YyFNUKg9jmY6uwkA1em
33
34
  code_puppy/command_line/prompt_toolkit_completion.py,sha256=PLb1Yt_65iQI2mzPEPxPsSaRww5Ur951Qn-K-TLMCGQ,10006
34
35
  code_puppy/command_line/utils.py,sha256=7eyxDHjPjPB9wGDJQQcXV_zOsGdYsFgI0SGCetVmTqE,1251
35
36
  code_puppy/command_line/mcp/__init__.py,sha256=0-OQuwjq_pLiTVJ1_NrirVwdRerghyKs_MTZkwPC7YY,315
36
- code_puppy/command_line/mcp/add_command.py,sha256=f65ZVb_LF4y9KjAms66bPxGrxUW6EeEJCT4HrZXdkqY,6416
37
+ code_puppy/command_line/mcp/add_command.py,sha256=lZ09RpFDIeghX1zhc2YIAqBASs5Ra52x5YAasUKvqJg,6409
37
38
  code_puppy/command_line/mcp/base.py,sha256=0uessGekNM37X3tLrKImkhZpgxjAoR7fqskbmkBjcf8,890
38
39
  code_puppy/command_line/mcp/handler.py,sha256=ZPWNfJEwGurNQh4KUCRRRsHXuB0kHlaGG4oLN1sFhBg,4412
39
40
  code_puppy/command_line/mcp/help_command.py,sha256=Z55-ObpUQFMdqMWAkSJkRi_v2uZhDFVxg6DcIdjVY6Q,5250
40
- code_puppy/command_line/mcp/install_command.py,sha256=Smp7nCDTwXplltQi2OPlUIwWdFvNEMcg-6h62EI5AhM,8827
41
+ code_puppy/command_line/mcp/install_command.py,sha256=kNASwQ-CUL_5zBTzJhBy_G2ClI6kRzgLJx7EeQX4TmE,8820
41
42
  code_puppy/command_line/mcp/list_command.py,sha256=tjxCMmK6yrmjM5L-lFY_qdL_dKTPdBZ_q0bEoAmBADg,3179
42
43
  code_puppy/command_line/mcp/logs_command.py,sha256=x_QsVGPpI5XY7RCZtiNFegc6R3csiwF_IEB_Rh2575w,4453
43
44
  code_puppy/command_line/mcp/remove_command.py,sha256=MrWmXQ9jZTq1wrohFDO3ls0a1faTHCqRZocN-ynTzh8,2753
@@ -69,7 +70,7 @@ code_puppy/mcp_/status_tracker.py,sha256=uekxrzkzIWrv3OfSVgblaPuoGFcAh_dBYwCcaHZ
69
70
  code_puppy/mcp_/system_tools.py,sha256=7_oR8k0c8YjtCcYF9g7A946oAGuKOf_i-92aJH7VmlQ,7331
70
71
  code_puppy/mcp_/examples/retry_example.py,sha256=VVdSr7Jq7PPS7AVU1Ev5LnUZe2uBpdZYG7oJSo05TKM,7221
71
72
  code_puppy/messaging/__init__.py,sha256=h2eZ7nJblKF71_dNUIBj3vL5RDw7WGy8nh6T_EYVrcA,1176
72
- code_puppy/messaging/message_queue.py,sha256=CDVpstLee_RbCBeJWv2eON3c3qhlEISf2i9aSXJTLU4,12600
73
+ code_puppy/messaging/message_queue.py,sha256=A6_CVn55tA5cINrIML9N_fS6fGQP-n8gC8abnkwF034,12572
73
74
  code_puppy/messaging/queue_console.py,sha256=hf32bKfAOdAaxYuARnmDuWhq4ET77xMWDvu5_T2JggY,10912
74
75
  code_puppy/messaging/renderers.py,sha256=9VOpVmu7emyyg1CXgm17u4IzMNcLHvueBl7G14pLQho,16123
75
76
  code_puppy/messaging/spinner/__init__.py,sha256=9mkXPYojafydBOAMh9ZUrB4X6uH5Iqz_-E-Obpd72ko,1365
@@ -87,7 +88,7 @@ code_puppy/tools/browser_screenshot.py,sha256=QSwxS37G4LSo-Q9SBiuIofxWKnyInM90TY
87
88
  code_puppy/tools/browser_scripts.py,sha256=BLSx1Q2F_mOOoGCoyXat3HvazTb1XaFYPXAF8CYVeX8,15071
88
89
  code_puppy/tools/browser_workflows.py,sha256=4u4u59arpY65hdcDMvJGpT02vks0ufnXNJVujzKe_dg,6430
89
90
  code_puppy/tools/camoufox_manager.py,sha256=bYnwyOETGDe_h8Q3kXH7w6kFr-OCpMi_Zuh4y11p__E,5097
90
- code_puppy/tools/command_runner.py,sha256=sum09fxhHtQ0-8xBpVYyVGbQyLjscGQh0fHNYsoU09E,22424
91
+ code_puppy/tools/command_runner.py,sha256=5H4wK-v3UQ713_0JRefpAwyxGBWD9R5yLptR6BhZyIY,22417
91
92
  code_puppy/tools/common.py,sha256=pL-9xcRs3rxU7Fl9X9EUgbDp2-csh2LLJ5DHH_KAHKY,10596
92
93
  code_puppy/tools/file_modifications.py,sha256=EaDWcv6gi8wAvpgyeJdKSKPWg9fTpZoEkxQiLCE6rn4,23218
93
94
  code_puppy/tools/file_operations.py,sha256=3RX-eqhIukle3KA-QTEaiTMIefOWy_JhmPQaijEAt6U,32481
@@ -103,7 +104,7 @@ code_puppy/tools/browser/browser_workflows.py,sha256=HZ0lPmEyAobPIWR-SK1E0ngW1Of
103
104
  code_puppy/tools/browser/camoufox_manager.py,sha256=E1CJwQtzPFoDQiXSHHk28niWuJhk3soZ-ItxXCXWO0M,6938
104
105
  code_puppy/tools/browser/vqa_agent.py,sha256=u3EhZQHNBiWUn-XLDK9MnVyd2ChshPnVZbB9iEkEqH4,2118
105
106
  code_puppy/tui/__init__.py,sha256=XesAxIn32zLPOmvpR2wIDxDAnnJr81a5pBJB4cZp1Xs,321
106
- code_puppy/tui/app.py,sha256=6yNOC7_93WjfwUxI6LBLmhTZl1FAXO03ErsSq5HxBVs,39434
107
+ code_puppy/tui/app.py,sha256=WW-O8myM_yl05NIdj_aSp7zN56AwimgdMA9TmEOE54Y,39427
107
108
  code_puppy/tui/messages.py,sha256=zQoToWI0eWdT36NEsY6RdCFzcDfAmfvoPlHv8jiCbgo,720
108
109
  code_puppy/tui/components/__init__.py,sha256=uj5pnk3s6SEN3SbFI0ZnzaA2KK1NNg8TfUj6U-Z732U,455
109
110
  code_puppy/tui/components/chat_view.py,sha256=NfyNXuN2idPht1rKJB4YhHVXb1AIRNO5q_nLdt8Ocug,19913
@@ -123,9 +124,9 @@ code_puppy/tui/screens/help.py,sha256=eJuPaOOCp7ZSUlecearqsuX6caxWv7NQszUh0tZJjB
123
124
  code_puppy/tui/screens/mcp_install_wizard.py,sha256=vObpQwLbXjQsxmSg-WCasoev1usEi0pollKnL0SHu9U,27693
124
125
  code_puppy/tui/screens/settings.py,sha256=GMpv-qa08rorAE9mj3AjmqjZFPhmeJ_GWd-DBHG6iAA,10671
125
126
  code_puppy/tui/screens/tools.py,sha256=3pr2Xkpa9Js6Yhf1A3_wQVRzFOui-KDB82LwrsdBtyk,1715
126
- code_puppy-0.0.172.data/data/code_puppy/models.json,sha256=iXmLZGflnQcu2DRh4WUlgAhoXdvoxUc7KBhB8YxawXM,3088
127
- code_puppy-0.0.172.dist-info/METADATA,sha256=VjpMGSIgA_Trvm70on6SEy3_9eFjgsOoakkVueRMWvA,20106
128
- code_puppy-0.0.172.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
129
- code_puppy-0.0.172.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
130
- code_puppy-0.0.172.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
131
- code_puppy-0.0.172.dist-info/RECORD,,
127
+ code_puppy-0.0.173.data/data/code_puppy/models.json,sha256=iXmLZGflnQcu2DRh4WUlgAhoXdvoxUc7KBhB8YxawXM,3088
128
+ code_puppy-0.0.173.dist-info/METADATA,sha256=4qImoiksA3Y_7gc-YlHc7cAdJJa6pnP2xhOUQbQ9vCY,20106
129
+ code_puppy-0.0.173.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
130
+ code_puppy-0.0.173.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
131
+ code_puppy-0.0.173.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
132
+ code_puppy-0.0.173.dist-info/RECORD,,