code-puppy 0.0.176__py3-none-any.whl → 0.0.178__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agents/__init__.py +2 -2
- code_puppy/agents/agent_code_puppy.py +2 -1
- code_puppy/agents/agent_creator_agent.py +3 -2
- code_puppy/agents/agent_golang_reviewer.py +61 -0
- code_puppy/agents/agent_manager.py +5 -5
- code_puppy/agents/base_agent.py +60 -40
- code_puppy/command_line/command_handler.py +19 -10
- code_puppy/command_line/mcp/start_all_command.py +1 -1
- code_puppy/command_line/mcp/start_command.py +0 -1
- code_puppy/command_line/mcp/stop_all_command.py +1 -1
- code_puppy/command_line/mcp/stop_command.py +1 -0
- code_puppy/config.py +5 -3
- code_puppy/main.py +5 -2
- code_puppy/mcp_/examples/retry_example.py +4 -1
- code_puppy/messaging/spinner/console_spinner.py +1 -1
- code_puppy/model_factory.py +1 -1
- code_puppy/round_robin_model.py +2 -4
- code_puppy/tools/agent_tools.py +10 -8
- code_puppy/tools/browser/browser_screenshot.py +4 -3
- code_puppy/tools/browser/browser_scripts.py +0 -6
- code_puppy/tools/browser/browser_workflows.py +28 -20
- code_puppy/tools/browser/camoufox_manager.py +10 -9
- code_puppy/tools/browser_scripts.py +0 -6
- code_puppy/tools/browser_workflows.py +28 -20
- code_puppy/tools/command_runner.py +1 -1
- code_puppy/tui/app.py +3 -13
- code_puppy/tui/components/chat_view.py +1 -0
- code_puppy/tui/screens/settings.py +3 -3
- {code_puppy-0.0.176.dist-info → code_puppy-0.0.178.dist-info}/METADATA +10 -10
- {code_puppy-0.0.176.dist-info → code_puppy-0.0.178.dist-info}/RECORD +34 -33
- {code_puppy-0.0.176.data → code_puppy-0.0.178.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.176.dist-info → code_puppy-0.0.178.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.176.dist-info → code_puppy-0.0.178.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.176.dist-info → code_puppy-0.0.178.dist-info}/licenses/LICENSE +0 -0
code_puppy/agents/__init__.py
CHANGED
@@ -5,12 +5,12 @@ configurations, each with their own system prompts and tool sets.
|
|
5
5
|
"""
|
6
6
|
|
7
7
|
from .agent_manager import (
|
8
|
+
get_agent_descriptions,
|
8
9
|
get_available_agents,
|
9
10
|
get_current_agent,
|
10
|
-
set_current_agent,
|
11
11
|
load_agent,
|
12
|
-
get_agent_descriptions,
|
13
12
|
refresh_agents,
|
13
|
+
set_current_agent,
|
14
14
|
)
|
15
15
|
|
16
16
|
__all__ = [
|
@@ -4,11 +4,12 @@ import json
|
|
4
4
|
import os
|
5
5
|
from typing import Dict, List, Optional
|
6
6
|
|
7
|
-
from .base_agent import BaseAgent
|
8
7
|
from code_puppy.config import get_user_agents_directory
|
9
8
|
from code_puppy.model_factory import ModelFactory
|
10
9
|
from code_puppy.tools import get_available_tool_names
|
11
10
|
|
11
|
+
from .base_agent import BaseAgent
|
12
|
+
|
12
13
|
|
13
14
|
class AgentCreatorAgent(BaseAgent):
|
14
15
|
"""Specialized agent for creating JSON agent configurations."""
|
@@ -132,7 +133,7 @@ Users can optionally pin a specific model to their agent to override the global
|
|
132
133
|
|
133
134
|
Whenever you create agents, you should always replicate these detailed tool descriptions and examples in their system prompts. This ensures consistency and proper tool usage across all agents.
|
134
135
|
- Side note - these tool definitions are also available to you! So use them!
|
135
|
-
|
136
|
+
|
136
137
|
### File Operations Documentation:
|
137
138
|
|
138
139
|
#### `list_files(directory=".", recursive=True)`
|
@@ -0,0 +1,61 @@
|
|
1
|
+
"""Golang code reviewer agent."""
|
2
|
+
|
3
|
+
from .base_agent import BaseAgent
|
4
|
+
|
5
|
+
|
6
|
+
class GolangReviewerAgent(BaseAgent):
|
7
|
+
"""Golang-focused code reviewer agent."""
|
8
|
+
|
9
|
+
@property
|
10
|
+
def name(self) -> str:
|
11
|
+
return "golang-reviewer"
|
12
|
+
|
13
|
+
@property
|
14
|
+
def display_name(self) -> str:
|
15
|
+
return "Golang Reviewer 🦴"
|
16
|
+
|
17
|
+
@property
|
18
|
+
def description(self) -> str:
|
19
|
+
return "Meticulous reviewer for Go pull requests with idiomatic guidance"
|
20
|
+
|
21
|
+
def get_available_tools(self) -> list[str]:
|
22
|
+
"""Reviewers only need read and reasoning helpers."""
|
23
|
+
return [
|
24
|
+
"agent_share_your_reasoning",
|
25
|
+
"list_files",
|
26
|
+
"read_file",
|
27
|
+
"grep",
|
28
|
+
]
|
29
|
+
|
30
|
+
def get_system_prompt(self) -> str:
|
31
|
+
return """
|
32
|
+
You are an expert Golang reviewer puppy. Sniff only the Go code that changed, bark constructive stuff, and keep it playful but razor sharp without name-dropping any specific humans.
|
33
|
+
|
34
|
+
Mission profile:
|
35
|
+
- Review only tracked `.go` files with real code diffs. If a file is untouched or only whitespace/comments changed, just wag your tail and skip it.
|
36
|
+
- Ignore every non-Go file: `.yml`, `.yaml`, `.md`, `.json`, `.txt`, `Dockerfile`, `LICENSE`, `README.md`, etc. If someone tries to sneak one in, roll over and move on.
|
37
|
+
- Live by `Effective Go` (https://go.dev/doc/effective_go) and the `Google Go Style Guide` (https://google.github.io/styleguide/go/).
|
38
|
+
- Enforce gofmt/goimports cleanliness, make sure go vet and staticcheck would be happy, and flag any missing `//nolint` justifications.
|
39
|
+
- You are the guardian of SOLID, DRY, YAGNI, and the Zen of Python (yes, even here). Call out violations with precision.
|
40
|
+
|
41
|
+
Per Go file that actually matters:
|
42
|
+
1. Give a breezy high-level summary of what changed. No snooze-fests or line-by-line bedtime stories.
|
43
|
+
2. Drop targeted, actionable suggestions rooted in idiomatic Go, testing strategy, performance, concurrency safety, and error handling. No fluff or nitpicks unless they break principles.
|
44
|
+
3. Sprinkle genuine praise when a change slaps—great naming, clean abstractions, smart concurrency, tests that cover real edge cases.
|
45
|
+
|
46
|
+
Review etiquette:
|
47
|
+
- Stay concise, organized, and focused on impact. Group similar findings so the reader doesn’t chase their tail.
|
48
|
+
- Flag missing tests or weak coverage when it matters. Suggest concrete test names or scenarios.
|
49
|
+
- Prefer positive phrasing: "Consider" beats "Don’t". We’re a nice puppy, just ridiculously picky.
|
50
|
+
- If everything looks barking good, say so explicitly and call out strengths.
|
51
|
+
- Always mention residual risks or assumptions you made when you can’t fully verify something.
|
52
|
+
|
53
|
+
Output format (per file with real changes):
|
54
|
+
- File header like `file.go:123` when referencing issues. Avoid line ranges.
|
55
|
+
- Use bullet points for findings and kudos. Severity order: blockers first, then warnings, then nits, then praise.
|
56
|
+
- Close with overall verdict if multiple files: "Ship it", "Needs fixes", or "Mixed bag", plus a short rationale.
|
57
|
+
|
58
|
+
You are the Golang review persona for this CLI pack. Be sassy, precise, and wildly helpful.
|
59
|
+
- When concurrency primitives show up, double-check for race hazards, context cancellation, and proper error propagation.
|
60
|
+
- If performance or allocation pressure might bite, call it out and suggest profiling or benchmarks.
|
61
|
+
"""
|
@@ -6,13 +6,14 @@ import os
|
|
6
6
|
import pkgutil
|
7
7
|
import uuid
|
8
8
|
from pathlib import Path
|
9
|
+
from typing import Dict, List, Optional, Type, Union
|
10
|
+
|
9
11
|
from pydantic_ai.messages import ModelMessage
|
10
|
-
from typing import Dict, Optional, Type, Union, List
|
11
12
|
|
12
|
-
from code_puppy.callbacks import on_agent_reload
|
13
|
-
from code_puppy.messaging import emit_warning
|
14
13
|
from code_puppy.agents.base_agent import BaseAgent
|
15
14
|
from code_puppy.agents.json_agent import JSONAgent, discover_json_agents
|
15
|
+
from code_puppy.callbacks import on_agent_reload
|
16
|
+
from code_puppy.messaging import emit_warning
|
16
17
|
|
17
18
|
# Registry of available agents (Python classes and JSON file paths)
|
18
19
|
_AGENT_REGISTRY: Dict[str, Union[Type[BaseAgent], str]] = {}
|
@@ -148,7 +149,6 @@ def _ensure_session_cache_loaded() -> None:
|
|
148
149
|
_SESSION_FILE_LOADED = True
|
149
150
|
|
150
151
|
|
151
|
-
|
152
152
|
def _discover_agents(message_group_id: Optional[str] = None):
|
153
153
|
"""Dynamically discover all agent classes and JSON agents."""
|
154
154
|
# Always clear the registry to force refresh
|
@@ -251,7 +251,7 @@ def set_current_agent(agent_name: str) -> bool:
|
|
251
251
|
"""
|
252
252
|
global _CURRENT_AGENT
|
253
253
|
curr_agent = get_current_agent()
|
254
|
-
if curr_agent
|
254
|
+
if curr_agent is not None:
|
255
255
|
_AGENT_HISTORIES[curr_agent.name] = curr_agent.get_message_history()
|
256
256
|
# Generate a message group ID for agent switching
|
257
257
|
message_group_id = str(uuid.uuid4())
|
code_puppy/agents/base_agent.py
CHANGED
@@ -1,18 +1,18 @@
|
|
1
1
|
"""Base agent configuration class for defining agent properties."""
|
2
|
-
import math
|
3
|
-
|
4
|
-
import mcp
|
5
|
-
import signal
|
6
2
|
|
7
3
|
import asyncio
|
8
|
-
|
9
4
|
import json
|
5
|
+
import math
|
6
|
+
import signal
|
10
7
|
import uuid
|
11
8
|
from abc import ABC, abstractmethod
|
12
|
-
from pydantic_ai import UsageLimitExceeded
|
13
9
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
14
10
|
|
11
|
+
import mcp
|
15
12
|
import pydantic
|
13
|
+
import pydantic_ai.models
|
14
|
+
from pydantic_ai import Agent as PydanticAgent
|
15
|
+
from pydantic_ai import RunContext, UsageLimitExceeded
|
16
16
|
from pydantic_ai.messages import (
|
17
17
|
ModelMessage,
|
18
18
|
ModelRequest,
|
@@ -22,32 +22,34 @@ from pydantic_ai.messages import (
|
|
22
22
|
ToolReturn,
|
23
23
|
ToolReturnPart,
|
24
24
|
)
|
25
|
-
|
26
|
-
from pydantic_ai.settings import ModelSettings
|
27
25
|
from pydantic_ai.models.openai import OpenAIModelSettings
|
28
|
-
from pydantic_ai import
|
26
|
+
from pydantic_ai.settings import ModelSettings
|
29
27
|
|
30
28
|
# Consolidated relative imports
|
31
29
|
from code_puppy.config import (
|
32
30
|
get_agent_pinned_model,
|
33
31
|
get_compaction_strategy,
|
34
32
|
get_compaction_threshold,
|
35
|
-
get_message_limit,
|
36
33
|
get_global_model_name,
|
37
34
|
get_protected_token_count,
|
38
35
|
get_value,
|
39
36
|
load_mcp_server_configs,
|
40
37
|
)
|
41
|
-
from code_puppy.
|
38
|
+
from code_puppy.mcp_ import ServerConfig, get_mcp_manager
|
39
|
+
from code_puppy.messaging import (
|
40
|
+
emit_error,
|
41
|
+
emit_info,
|
42
|
+
emit_system_message,
|
43
|
+
emit_warning,
|
44
|
+
)
|
42
45
|
from code_puppy.model_factory import ModelFactory
|
43
46
|
from code_puppy.summarization_agent import run_summarization_sync
|
44
|
-
from code_puppy.mcp_ import ServerConfig, get_mcp_manager
|
45
47
|
from code_puppy.tools.common import console
|
46
48
|
|
47
49
|
|
48
50
|
class BaseAgent(ABC):
|
49
51
|
"""Base class for all agent configurations."""
|
50
|
-
|
52
|
+
|
51
53
|
def __init__(self):
|
52
54
|
self.id = str(uuid.uuid4())
|
53
55
|
self._message_history: List[Any] = []
|
@@ -57,6 +59,7 @@ class BaseAgent(ABC):
|
|
57
59
|
self._last_model_name: Optional[str] = None
|
58
60
|
# Puppy rules loaded lazily
|
59
61
|
self._puppy_rules: Optional[str] = None
|
62
|
+
self.cur_model: pydantic_ai.models.Model
|
60
63
|
|
61
64
|
@property
|
62
65
|
@abstractmethod
|
@@ -164,7 +167,7 @@ class BaseAgent(ABC):
|
|
164
167
|
"""Get pinned model name for this agent, if specified.
|
165
168
|
|
166
169
|
Returns:
|
167
|
-
Model name to use for this agent, or
|
170
|
+
Model name to use for this agent, or global default if none pinned.
|
168
171
|
"""
|
169
172
|
pinned = get_agent_pinned_model(self.name)
|
170
173
|
if pinned == "" or pinned is None:
|
@@ -199,7 +202,9 @@ class BaseAgent(ABC):
|
|
199
202
|
elif isinstance(content, str):
|
200
203
|
attributes.append(f"content={content}")
|
201
204
|
elif isinstance(content, pydantic.BaseModel):
|
202
|
-
attributes.append(
|
205
|
+
attributes.append(
|
206
|
+
f"content={json.dumps(content.model_dump(), sort_keys=True)}"
|
207
|
+
)
|
203
208
|
elif isinstance(content, dict):
|
204
209
|
attributes.append(f"content={json.dumps(content, sort_keys=True)}")
|
205
210
|
else:
|
@@ -217,7 +222,9 @@ class BaseAgent(ABC):
|
|
217
222
|
if instructions:
|
218
223
|
header_bits.append(f"instructions={instructions}")
|
219
224
|
|
220
|
-
part_strings = [
|
225
|
+
part_strings = [
|
226
|
+
self._stringify_part(part) for part in getattr(message, "parts", [])
|
227
|
+
]
|
221
228
|
canonical = "||".join(header_bits + part_strings)
|
222
229
|
return hash(canonical)
|
223
230
|
|
@@ -262,15 +269,14 @@ class BaseAgent(ABC):
|
|
262
269
|
|
263
270
|
def estimate_token_count(self, text: str) -> int:
|
264
271
|
"""
|
265
|
-
Simple token estimation using len(message)
|
272
|
+
Simple token estimation using len(message) / 3.
|
266
273
|
This replaces tiktoken with a much simpler approach.
|
267
274
|
"""
|
268
|
-
return max(1, math.floor((len(text) /
|
269
|
-
|
275
|
+
return max(1, math.floor((len(text) / 3)))
|
270
276
|
|
271
277
|
def estimate_tokens_for_message(self, message: ModelMessage) -> int:
|
272
278
|
"""
|
273
|
-
Estimate the number of tokens in a message using len(message)
|
279
|
+
Estimate the number of tokens in a message using len(message)
|
274
280
|
Simple and fast replacement for tiktoken.
|
275
281
|
"""
|
276
282
|
total_tokens = 0
|
@@ -348,7 +354,9 @@ class BaseAgent(ABC):
|
|
348
354
|
protected_token_count = system_tokens # Start with system message tokens
|
349
355
|
|
350
356
|
# Go backwards through non-system messages to find protected zone
|
351
|
-
for i in range(
|
357
|
+
for i in range(
|
358
|
+
len(messages) - 1, 0, -1
|
359
|
+
): # Stop at 1, not 0 (skip system message)
|
352
360
|
message = messages[i]
|
353
361
|
message_tokens = self.estimate_tokens_for_message(message)
|
354
362
|
|
@@ -378,9 +386,7 @@ class BaseAgent(ABC):
|
|
378
386
|
return messages_to_summarize, protected_messages
|
379
387
|
|
380
388
|
def summarize_messages(
|
381
|
-
self,
|
382
|
-
messages: List[ModelMessage],
|
383
|
-
with_protection: bool = True
|
389
|
+
self, messages: List[ModelMessage], with_protection: bool = True
|
384
390
|
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
385
391
|
"""
|
386
392
|
Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
|
@@ -435,7 +441,9 @@ class BaseAgent(ABC):
|
|
435
441
|
compacted: List[ModelMessage] = [system_message] + list(new_messages)
|
436
442
|
|
437
443
|
# Drop the system message from protected_messages because we already included it
|
438
|
-
protected_tail = [
|
444
|
+
protected_tail = [
|
445
|
+
msg for msg in protected_messages if msg is not system_message
|
446
|
+
]
|
439
447
|
|
440
448
|
compacted.extend(protected_tail)
|
441
449
|
|
@@ -457,7 +465,9 @@ class BaseAgent(ABC):
|
|
457
465
|
|
458
466
|
return int(context_length)
|
459
467
|
|
460
|
-
def prune_interrupted_tool_calls(
|
468
|
+
def prune_interrupted_tool_calls(
|
469
|
+
self, messages: List[ModelMessage]
|
470
|
+
) -> List[ModelMessage]:
|
461
471
|
"""
|
462
472
|
Remove any messages that participate in mismatched tool call sequences.
|
463
473
|
|
@@ -503,12 +513,15 @@ class BaseAgent(ABC):
|
|
503
513
|
pruned.append(msg)
|
504
514
|
return pruned
|
505
515
|
|
506
|
-
def message_history_processor(
|
516
|
+
def message_history_processor(
|
517
|
+
self, ctx: RunContext, messages: List[ModelMessage]
|
518
|
+
) -> List[ModelMessage]:
|
507
519
|
# First, prune any interrupted/mismatched tool-call conversations
|
508
|
-
total_current_tokens = sum(self.estimate_tokens_for_message(msg) for msg in messages)
|
509
|
-
|
510
520
|
model_max = self.get_model_context_length()
|
511
521
|
|
522
|
+
total_current_tokens = sum(
|
523
|
+
self.estimate_tokens_for_message(msg) for msg in messages
|
524
|
+
)
|
512
525
|
proportion_used = total_current_tokens / model_max
|
513
526
|
|
514
527
|
# Check if we're in TUI mode and can update the status bar
|
@@ -591,7 +604,9 @@ class BaseAgent(ABC):
|
|
591
604
|
return result_messages
|
592
605
|
return messages
|
593
606
|
|
594
|
-
def truncation(
|
607
|
+
def truncation(
|
608
|
+
self, messages: List[ModelMessage], protected_tokens: int
|
609
|
+
) -> List[ModelMessage]:
|
595
610
|
"""
|
596
611
|
Truncate message history to manage token usage.
|
597
612
|
|
@@ -648,6 +663,7 @@ class BaseAgent(ABC):
|
|
648
663
|
if self._puppy_rules is not None:
|
649
664
|
return self._puppy_rules
|
650
665
|
from pathlib import Path
|
666
|
+
|
651
667
|
possible_paths = ["AGENTS.md", "AGENT.md", "agents.md", "agent.md"]
|
652
668
|
for path_str in possible_paths:
|
653
669
|
puppy_rules_path = Path(path_str)
|
@@ -659,7 +675,6 @@ class BaseAgent(ABC):
|
|
659
675
|
|
660
676
|
def load_mcp_servers(self, extra_headers: Optional[Dict[str, str]] = None):
|
661
677
|
"""Load MCP servers through the manager and return pydantic-ai compatible servers."""
|
662
|
-
|
663
678
|
|
664
679
|
mcp_disabled = get_value("disable_mcp_servers")
|
665
680
|
if mcp_disabled and str(mcp_disabled).lower() in ("1", "true", "yes", "on"):
|
@@ -690,7 +705,9 @@ class BaseAgent(ABC):
|
|
690
705
|
else:
|
691
706
|
if existing.config != server_config.config:
|
692
707
|
manager.update_server(existing.id, server_config)
|
693
|
-
emit_system_message(
|
708
|
+
emit_system_message(
|
709
|
+
f"[dim]Updated MCP server: {name}[/dim]"
|
710
|
+
)
|
694
711
|
except Exception as e:
|
695
712
|
emit_error(f"Failed to register MCP server '{name}': {str(e)}")
|
696
713
|
continue
|
@@ -715,6 +732,7 @@ class BaseAgent(ABC):
|
|
715
732
|
def reload_code_generation_agent(self, message_group: Optional[str] = None):
|
716
733
|
"""Force-reload the pydantic-ai Agent based on current config and model."""
|
717
734
|
from code_puppy.tools import register_tools_for_agent
|
735
|
+
|
718
736
|
if message_group is None:
|
719
737
|
message_group = str(uuid.uuid4())
|
720
738
|
|
@@ -753,6 +771,7 @@ class BaseAgent(ABC):
|
|
753
771
|
model_settings_dict["extra_body"] = {"verbosity": "low"}
|
754
772
|
model_settings = OpenAIModelSettings(**model_settings_dict)
|
755
773
|
|
774
|
+
self.cur_model = model
|
756
775
|
p_agent = PydanticAgent(
|
757
776
|
model=model,
|
758
777
|
instructions=instructions,
|
@@ -772,8 +791,7 @@ class BaseAgent(ABC):
|
|
772
791
|
self.pydantic_agent = p_agent
|
773
792
|
return self._code_generation_agent
|
774
793
|
|
775
|
-
|
776
|
-
def message_history_accumulator(self, messages: List[Any]):
|
794
|
+
def message_history_accumulator(self, ctx: RunContext, messages: List[Any]):
|
777
795
|
_message_history = self.get_message_history()
|
778
796
|
message_history_hashes = set([self.hash_message(m) for m in _message_history])
|
779
797
|
for msg in messages:
|
@@ -785,13 +803,10 @@ class BaseAgent(ABC):
|
|
785
803
|
|
786
804
|
# Apply message history trimming using the main processor
|
787
805
|
# This ensures we maintain global state while still managing context limits
|
788
|
-
self.message_history_processor(_message_history)
|
806
|
+
self.message_history_processor(ctx, _message_history)
|
789
807
|
return self.get_message_history()
|
790
808
|
|
791
|
-
|
792
|
-
async def run_with_mcp(
|
793
|
-
self, prompt: str, usage_limits = None, **kwargs
|
794
|
-
) -> Any:
|
809
|
+
async def run_with_mcp(self, prompt: str, usage_limits=None, **kwargs) -> Any:
|
795
810
|
"""
|
796
811
|
Run the agent with MCP servers and full cancellation support.
|
797
812
|
|
@@ -814,7 +829,12 @@ class BaseAgent(ABC):
|
|
814
829
|
|
815
830
|
async def run_agent_task():
|
816
831
|
try:
|
817
|
-
result_ = await pydantic_agent.run(
|
832
|
+
result_ = await pydantic_agent.run(
|
833
|
+
prompt,
|
834
|
+
message_history=self.get_message_history(),
|
835
|
+
usage_limits=usage_limits,
|
836
|
+
**kwargs,
|
837
|
+
)
|
818
838
|
self.set_message_history(
|
819
839
|
self.prune_interrupted_tool_calls(self.get_message_history())
|
820
840
|
)
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import os
|
2
2
|
|
3
|
-
from code_puppy.agents import get_current_agent
|
4
3
|
from code_puppy.command_line.model_picker_completion import update_model_in_input
|
5
4
|
from code_puppy.command_line.motd import print_motd
|
6
5
|
from code_puppy.command_line.utils import make_directory_table
|
@@ -120,10 +119,9 @@ def handle_command(command: str):
|
|
120
119
|
return True
|
121
120
|
|
122
121
|
if command.strip().startswith("/compact"):
|
123
|
-
from code_puppy.config import get_compaction_strategy
|
124
122
|
# Functions have been moved to BaseAgent class
|
125
123
|
from code_puppy.agents.agent_manager import get_current_agent
|
126
|
-
from code_puppy.config import get_protected_token_count
|
124
|
+
from code_puppy.config import get_compaction_strategy, get_protected_token_count
|
127
125
|
from code_puppy.messaging import (
|
128
126
|
emit_error,
|
129
127
|
emit_info,
|
@@ -139,7 +137,9 @@ def handle_command(command: str):
|
|
139
137
|
return True
|
140
138
|
|
141
139
|
current_agent = get_current_agent()
|
142
|
-
before_tokens = sum(
|
140
|
+
before_tokens = sum(
|
141
|
+
current_agent.estimate_tokens_for_message(m) for m in history
|
142
|
+
)
|
143
143
|
compaction_strategy = get_compaction_strategy()
|
144
144
|
protected_tokens = get_protected_token_count()
|
145
145
|
emit_info(
|
@@ -163,7 +163,9 @@ def handle_command(command: str):
|
|
163
163
|
agent.set_message_history(compacted)
|
164
164
|
|
165
165
|
current_agent = get_current_agent()
|
166
|
-
after_tokens = sum(
|
166
|
+
after_tokens = sum(
|
167
|
+
current_agent.estimate_tokens_for_message(m) for m in compacted
|
168
|
+
)
|
167
169
|
reduction_pct = (
|
168
170
|
((before_tokens - after_tokens) / before_tokens * 100)
|
169
171
|
if before_tokens > 0
|
@@ -424,6 +426,7 @@ def handle_command(command: str):
|
|
424
426
|
|
425
427
|
# Get built-in agents
|
426
428
|
from code_puppy.agents.agent_manager import get_agent_descriptions
|
429
|
+
|
427
430
|
builtin_agents = get_agent_descriptions()
|
428
431
|
|
429
432
|
emit_info("Available models:")
|
@@ -456,6 +459,7 @@ def handle_command(command: str):
|
|
456
459
|
|
457
460
|
# Get list of available built-in agents
|
458
461
|
from code_puppy.agents.agent_manager import get_agent_descriptions
|
462
|
+
|
459
463
|
builtin_agents = get_agent_descriptions()
|
460
464
|
|
461
465
|
is_json_agent = agent_name in json_agents
|
@@ -495,6 +499,7 @@ def handle_command(command: str):
|
|
495
499
|
else:
|
496
500
|
# Handle built-in Python agent - store in config
|
497
501
|
from code_puppy.config import set_agent_pinned_model
|
502
|
+
|
498
503
|
set_agent_pinned_model(agent_name, model_name)
|
499
504
|
|
500
505
|
emit_success(f"Model '{model_name}' pinned to agent '{agent_name}'")
|
@@ -504,7 +509,6 @@ def handle_command(command: str):
|
|
504
509
|
|
505
510
|
current_agent = get_current_agent()
|
506
511
|
if current_agent.name == agent_name:
|
507
|
-
|
508
512
|
emit_info(f"Active agent reloaded with pinned model '{model_name}'")
|
509
513
|
|
510
514
|
return True
|
@@ -554,9 +558,9 @@ def handle_command(command: str):
|
|
554
558
|
from datetime import datetime
|
555
559
|
from pathlib import Path
|
556
560
|
|
557
|
-
from code_puppy.config import CONFIG_DIR
|
558
561
|
# estimate_tokens_for_message has been moved to BaseAgent class
|
559
562
|
from code_puppy.agents.agent_manager import get_current_agent
|
563
|
+
from code_puppy.config import CONFIG_DIR
|
560
564
|
|
561
565
|
tokens = command.split()
|
562
566
|
if len(tokens) != 2:
|
@@ -588,7 +592,9 @@ def handle_command(command: str):
|
|
588
592
|
"session_name": session_name,
|
589
593
|
"timestamp": datetime.now().isoformat(),
|
590
594
|
"message_count": len(history),
|
591
|
-
"total_tokens": sum(
|
595
|
+
"total_tokens": sum(
|
596
|
+
current_agent.estimate_tokens_for_message(m) for m in history
|
597
|
+
),
|
592
598
|
"file_path": str(pickle_file),
|
593
599
|
}
|
594
600
|
|
@@ -609,9 +615,9 @@ def handle_command(command: str):
|
|
609
615
|
import pickle
|
610
616
|
from pathlib import Path
|
611
617
|
|
612
|
-
from code_puppy.config import CONFIG_DIR
|
613
618
|
# estimate_tokens_for_message has been moved to BaseAgent class
|
614
619
|
from code_puppy.agents.agent_manager import get_current_agent
|
620
|
+
from code_puppy.config import CONFIG_DIR
|
615
621
|
|
616
622
|
tokens = command.split()
|
617
623
|
if len(tokens) != 2:
|
@@ -638,7 +644,9 @@ def handle_command(command: str):
|
|
638
644
|
agent = get_current_agent()
|
639
645
|
agent.set_message_history(history)
|
640
646
|
current_agent = get_current_agent()
|
641
|
-
total_tokens = sum(
|
647
|
+
total_tokens = sum(
|
648
|
+
current_agent.estimate_tokens_for_message(m) for m in history
|
649
|
+
)
|
642
650
|
|
643
651
|
emit_success(
|
644
652
|
f"✅ Context loaded: {len(history)} messages ({total_tokens} tokens)\n"
|
@@ -652,6 +660,7 @@ def handle_command(command: str):
|
|
652
660
|
|
653
661
|
if command.startswith("/truncate"):
|
654
662
|
from code_puppy.agents.agent_manager import get_current_agent
|
663
|
+
|
655
664
|
tokens = command.split()
|
656
665
|
if len(tokens) != 2:
|
657
666
|
emit_error(
|
@@ -9,8 +9,8 @@ from typing import List, Optional
|
|
9
9
|
from code_puppy.mcp_.managed_server import ServerState
|
10
10
|
from code_puppy.messaging import emit_info
|
11
11
|
|
12
|
-
from .base import MCPCommandBase
|
13
12
|
from ...agents import get_current_agent
|
13
|
+
from .base import MCPCommandBase
|
14
14
|
|
15
15
|
# Configure logging
|
16
16
|
logger = logging.getLogger(__name__)
|
@@ -9,8 +9,8 @@ from typing import List, Optional
|
|
9
9
|
from code_puppy.mcp_.managed_server import ServerState
|
10
10
|
from code_puppy.messaging import emit_info
|
11
11
|
|
12
|
-
from .base import MCPCommandBase
|
13
12
|
from ...agents import get_current_agent
|
13
|
+
from .base import MCPCommandBase
|
14
14
|
|
15
15
|
# Configure logging
|
16
16
|
logger = logging.getLogger(__name__)
|
code_puppy/config.py
CHANGED
@@ -162,8 +162,6 @@ def load_mcp_server_configs():
|
|
162
162
|
return {}
|
163
163
|
|
164
164
|
|
165
|
-
|
166
|
-
|
167
165
|
def _default_model_from_models_json():
|
168
166
|
"""Attempt to load the first model name from models.json.
|
169
167
|
|
@@ -298,7 +296,11 @@ def _validate_model_exists(model_name: str) -> bool:
|
|
298
296
|
|
299
297
|
def clear_model_cache():
|
300
298
|
"""Clear the model validation cache. Call this when models.json changes."""
|
301
|
-
global
|
299
|
+
global \
|
300
|
+
_model_validation_cache, \
|
301
|
+
_default_model_cache, \
|
302
|
+
_default_vision_model_cache, \
|
303
|
+
_default_vqa_model_cache
|
302
304
|
_model_validation_cache.clear()
|
303
305
|
_default_model_cache = None
|
304
306
|
_default_vision_model_cache = None
|
code_puppy/main.py
CHANGED
@@ -24,10 +24,12 @@ from code_puppy.config import (
|
|
24
24
|
save_command_to_history,
|
25
25
|
)
|
26
26
|
from code_puppy.http_utils import find_available_port
|
27
|
+
from code_puppy.tools.common import console
|
28
|
+
|
27
29
|
# message_history_accumulator and prune_interrupted_tool_calls have been moved to BaseAgent class
|
28
30
|
from code_puppy.tui_state import is_tui_mode, set_tui_mode
|
29
|
-
from code_puppy.tools.common import console
|
30
31
|
from code_puppy.version_checker import default_version_mismatch_behavior
|
32
|
+
|
31
33
|
plugins.load_plugin_callbacks()
|
32
34
|
|
33
35
|
|
@@ -261,8 +263,9 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
|
|
261
263
|
emit_info("[bold cyan]Initializing agent...[/bold cyan]")
|
262
264
|
# Initialize the runtime agent manager
|
263
265
|
if initial_command:
|
264
|
-
from code_puppy.messaging import emit_info, emit_system_message
|
265
266
|
from code_puppy.agents import get_current_agent
|
267
|
+
from code_puppy.messaging import emit_info, emit_system_message
|
268
|
+
|
266
269
|
agent = get_current_agent()
|
267
270
|
emit_info(
|
268
271
|
f"[bold blue]Processing initial command:[/bold blue] {initial_command}"
|
@@ -17,7 +17,10 @@ from typing import Any
|
|
17
17
|
project_root = Path(__file__).parents[3]
|
18
18
|
sys.path.insert(0, str(project_root))
|
19
19
|
|
20
|
-
from code_puppy.mcp_.retry_manager import
|
20
|
+
from code_puppy.mcp_.retry_manager import ( # noqa: E402
|
21
|
+
get_retry_manager,
|
22
|
+
retry_mcp_call,
|
23
|
+
)
|
21
24
|
|
22
25
|
logger = logging.getLogger(__name__)
|
23
26
|
|
@@ -47,7 +47,7 @@ class ConsoleSpinner(SpinnerBase):
|
|
47
47
|
self._live = Live(
|
48
48
|
self._generate_spinner_panel(),
|
49
49
|
console=self.console,
|
50
|
-
refresh_per_second=
|
50
|
+
refresh_per_second=20,
|
51
51
|
transient=True,
|
52
52
|
auto_refresh=False, # Don't auto-refresh to avoid wiping out user input
|
53
53
|
)
|
code_puppy/model_factory.py
CHANGED
@@ -11,9 +11,9 @@ from pydantic_ai.models.anthropic import AnthropicModel
|
|
11
11
|
from pydantic_ai.models.gemini import GeminiModel
|
12
12
|
from pydantic_ai.models.openai import OpenAIChatModel
|
13
13
|
from pydantic_ai.providers.anthropic import AnthropicProvider
|
14
|
+
from pydantic_ai.providers.cerebras import CerebrasProvider
|
14
15
|
from pydantic_ai.providers.google_gla import GoogleGLAProvider
|
15
16
|
from pydantic_ai.providers.openai import OpenAIProvider
|
16
|
-
from pydantic_ai.providers.cerebras import CerebrasProvider
|
17
17
|
from pydantic_ai.providers.openrouter import OpenRouterProvider
|
18
18
|
|
19
19
|
from . import callbacks
|