code-puppy 0.0.154__py3-none-any.whl → 0.0.156__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agent.py +26 -5
- code_puppy/agents/agent_creator_agent.py +65 -13
- code_puppy/agents/json_agent.py +8 -0
- code_puppy/agents/runtime_manager.py +12 -4
- code_puppy/command_line/command_handler.py +83 -0
- code_puppy/command_line/mcp/install_command.py +50 -1
- code_puppy/command_line/mcp/wizard_utils.py +88 -17
- code_puppy/command_line/prompt_toolkit_completion.py +18 -2
- code_puppy/config.py +8 -2
- code_puppy/main.py +17 -4
- code_puppy/mcp/__init__.py +2 -2
- code_puppy/mcp/config_wizard.py +1 -1
- code_puppy/messaging/spinner/console_spinner.py +1 -1
- code_puppy/model_factory.py +13 -12
- code_puppy/models.json +26 -0
- code_puppy/round_robin_model.py +35 -18
- code_puppy/summarization_agent.py +1 -3
- code_puppy/tools/agent_tools.py +41 -138
- code_puppy/tools/file_operations.py +116 -96
- code_puppy/tui/app.py +1 -1
- {code_puppy-0.0.154.data → code_puppy-0.0.156.data}/data/code_puppy/models.json +26 -0
- {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/METADATA +4 -3
- {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/RECORD +26 -48
- code_puppy/token_utils.py +0 -67
- code_puppy/tools/token_check.py +0 -32
- code_puppy/tui/tests/__init__.py +0 -1
- code_puppy/tui/tests/test_agent_command.py +0 -79
- code_puppy/tui/tests/test_chat_message.py +0 -28
- code_puppy/tui/tests/test_chat_view.py +0 -88
- code_puppy/tui/tests/test_command_history.py +0 -89
- code_puppy/tui/tests/test_copy_button.py +0 -191
- code_puppy/tui/tests/test_custom_widgets.py +0 -27
- code_puppy/tui/tests/test_disclaimer.py +0 -27
- code_puppy/tui/tests/test_enums.py +0 -15
- code_puppy/tui/tests/test_file_browser.py +0 -60
- code_puppy/tui/tests/test_help.py +0 -38
- code_puppy/tui/tests/test_history_file_reader.py +0 -107
- code_puppy/tui/tests/test_input_area.py +0 -33
- code_puppy/tui/tests/test_settings.py +0 -44
- code_puppy/tui/tests/test_sidebar.py +0 -33
- code_puppy/tui/tests/test_sidebar_history.py +0 -153
- code_puppy/tui/tests/test_sidebar_history_navigation.py +0 -132
- code_puppy/tui/tests/test_status_bar.py +0 -54
- code_puppy/tui/tests/test_timestamped_history.py +0 -52
- code_puppy/tui/tests/test_tools.py +0 -82
- {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/licenses/LICENSE +0 -0
code_puppy/agent.py
CHANGED
|
@@ -24,17 +24,17 @@ from code_puppy.tools.common import console
|
|
|
24
24
|
|
|
25
25
|
def load_puppy_rules():
|
|
26
26
|
global PUPPY_RULES
|
|
27
|
-
|
|
27
|
+
|
|
28
28
|
# Check for all 4 combinations of the rules file
|
|
29
29
|
possible_paths = ["AGENTS.md", "AGENT.md", "agents.md", "agent.md"]
|
|
30
|
-
|
|
30
|
+
|
|
31
31
|
for path_str in possible_paths:
|
|
32
32
|
puppy_rules_path = Path(path_str)
|
|
33
33
|
if puppy_rules_path.exists():
|
|
34
34
|
with open(puppy_rules_path, "r") as f:
|
|
35
35
|
puppy_rules = f.read()
|
|
36
36
|
return puppy_rules
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
# If none of the files exist, return None
|
|
39
39
|
return None
|
|
40
40
|
|
|
@@ -134,7 +134,15 @@ def reload_code_generation_agent(message_group: str | None):
|
|
|
134
134
|
clear_model_cache()
|
|
135
135
|
clear_agent_cache()
|
|
136
136
|
|
|
137
|
-
|
|
137
|
+
# Check if current agent has a pinned model
|
|
138
|
+
from code_puppy.agents import get_current_agent_config
|
|
139
|
+
agent_config = get_current_agent_config()
|
|
140
|
+
agent_model_name = None
|
|
141
|
+
if hasattr(agent_config, 'get_model_name'):
|
|
142
|
+
agent_model_name = agent_config.get_model_name()
|
|
143
|
+
|
|
144
|
+
# Use agent-specific model if pinned, otherwise use global model
|
|
145
|
+
model_name = agent_model_name if agent_model_name else get_model_name()
|
|
138
146
|
emit_info(
|
|
139
147
|
f"[bold cyan]Loading Model: {model_name}[/bold cyan]",
|
|
140
148
|
message_group=message_group,
|
|
@@ -193,7 +201,19 @@ def get_code_generation_agent(force_reload=False, message_group: str | None = No
|
|
|
193
201
|
message_group = str(uuid.uuid4())
|
|
194
202
|
from code_puppy.config import get_model_name
|
|
195
203
|
|
|
196
|
-
|
|
204
|
+
# Get the global model name
|
|
205
|
+
global_model_name = get_model_name()
|
|
206
|
+
|
|
207
|
+
# Check if current agent has a pinned model
|
|
208
|
+
from code_puppy.agents import get_current_agent_config
|
|
209
|
+
agent_config = get_current_agent_config()
|
|
210
|
+
agent_model_name = None
|
|
211
|
+
if hasattr(agent_config, 'get_model_name'):
|
|
212
|
+
agent_model_name = agent_config.get_model_name()
|
|
213
|
+
|
|
214
|
+
# Use agent-specific model if pinned, otherwise use global model
|
|
215
|
+
model_name = agent_model_name if agent_model_name else global_model_name
|
|
216
|
+
|
|
197
217
|
if _code_generation_agent is None or _LAST_MODEL_NAME != model_name or force_reload:
|
|
198
218
|
return reload_code_generation_agent(message_group)
|
|
199
219
|
return _code_generation_agent
|
|
@@ -206,4 +226,5 @@ def get_custom_usage_limits():
|
|
|
206
226
|
Default pydantic-ai limit is 50, this increases it to the configured value (default 100).
|
|
207
227
|
"""
|
|
208
228
|
from code_puppy.config import get_message_limit
|
|
229
|
+
|
|
209
230
|
return UsageLimits(request_limit=get_message_limit())
|
|
@@ -6,6 +6,7 @@ from typing import Dict, List, Optional
|
|
|
6
6
|
|
|
7
7
|
from .base_agent import BaseAgent
|
|
8
8
|
from code_puppy.config import get_user_agents_directory
|
|
9
|
+
from code_puppy.model_factory import ModelFactory
|
|
9
10
|
from code_puppy.tools import get_available_tool_names
|
|
10
11
|
|
|
11
12
|
|
|
@@ -27,6 +28,16 @@ class AgentCreatorAgent(BaseAgent):
|
|
|
27
28
|
def get_system_prompt(self) -> str:
|
|
28
29
|
available_tools = get_available_tool_names()
|
|
29
30
|
agents_dir = get_user_agents_directory()
|
|
31
|
+
|
|
32
|
+
# Load available models dynamically
|
|
33
|
+
models_config = ModelFactory.load_config()
|
|
34
|
+
model_descriptions = []
|
|
35
|
+
for model_name, model_info in models_config.items():
|
|
36
|
+
model_type = model_info.get('type', 'Unknown')
|
|
37
|
+
context_length = model_info.get('context_length', 'Unknown')
|
|
38
|
+
model_descriptions.append(f"- **{model_name}**: {model_type} model with {context_length} context")
|
|
39
|
+
|
|
40
|
+
available_models_str = "\n".join(model_descriptions)
|
|
30
41
|
|
|
31
42
|
return f"""You are the Agent Creator! 🏗️ Your mission is to help users create awesome JSON agent files through an interactive process.
|
|
32
43
|
|
|
@@ -39,7 +50,7 @@ You specialize in:
|
|
|
39
50
|
- Creating properly structured JSON agent files
|
|
40
51
|
- Explaining agent capabilities and best practices
|
|
41
52
|
|
|
42
|
-
## MANDATORY
|
|
53
|
+
## MANDATORY AGENT CREATION PROCESS
|
|
43
54
|
|
|
44
55
|
**YOU MUST ALWAYS:**
|
|
45
56
|
1. Ask the user what the agent should be able to do
|
|
@@ -47,6 +58,8 @@ You specialize in:
|
|
|
47
58
|
3. List ALL available tools so they can see other options
|
|
48
59
|
4. Ask them to confirm their tool selection
|
|
49
60
|
5. Explain why each selected tool is useful for their agent
|
|
61
|
+
6. Ask if they want to pin a specific model to the agent using your `ask_about_model_pinning` method
|
|
62
|
+
7. Include the model in the final JSON if the user chooses to pin one
|
|
50
63
|
|
|
51
64
|
## JSON Agent Schema
|
|
52
65
|
|
|
@@ -63,7 +76,8 @@ Here's the complete schema for JSON agent files:
|
|
|
63
76
|
"user_prompt": "How can I help?", // OPTIONAL: Custom greeting
|
|
64
77
|
"tools_config": {{ // OPTIONAL: Tool configuration
|
|
65
78
|
"timeout": 60
|
|
66
|
-
}}
|
|
79
|
+
}},
|
|
80
|
+
"model": "model-name" // OPTIONAL: Pin a specific model for this agent
|
|
67
81
|
}}
|
|
68
82
|
```
|
|
69
83
|
|
|
@@ -77,10 +91,24 @@ Here's the complete schema for JSON agent files:
|
|
|
77
91
|
- `display_name`: Pretty display name (defaults to title-cased name + 🤖)
|
|
78
92
|
- `user_prompt`: Custom user greeting
|
|
79
93
|
- `tools_config`: Tool configuration object
|
|
94
|
+
- `model`: Pin a specific model for this agent (defaults to global model)
|
|
80
95
|
|
|
81
96
|
## ALL AVAILABLE TOOLS:
|
|
82
97
|
{", ".join(f"- **{tool}**" for tool in available_tools)}
|
|
83
98
|
|
|
99
|
+
## ALL AVAILABLE MODELS:
|
|
100
|
+
{available_models_str}
|
|
101
|
+
|
|
102
|
+
Users can optionally pin a specific model to their agent to override the global default.
|
|
103
|
+
|
|
104
|
+
### When to Pin Models:
|
|
105
|
+
- For specialized agents that need specific capabilities (e.g., code-heavy agents might need a coding model)
|
|
106
|
+
- When cost optimization is important (use a smaller model for simple tasks)
|
|
107
|
+
- For privacy-sensitive work (use a local model)
|
|
108
|
+
- When specific performance characteristics are needed
|
|
109
|
+
|
|
110
|
+
**When asking users about model pinning, explain these use cases and why it might be beneficial for their agent!**
|
|
111
|
+
|
|
84
112
|
## Tool Categories & Suggestions:
|
|
85
113
|
|
|
86
114
|
### 📁 **File Operations** (for agents working with files):
|
|
@@ -122,13 +150,15 @@ Use this to recursively search for a string across files starting from the speci
|
|
|
122
150
|
|
|
123
151
|
### Tool Usage Instructions:
|
|
124
152
|
|
|
125
|
-
#### `
|
|
153
|
+
#### `ask_about_model_pinning(agent_config)`
|
|
154
|
+
Use this method to ask the user whether they want to pin a specific model to their agent. Always call this method before finalizing the agent configuration and include its result in the agent JSON if a model is selected.
|
|
126
155
|
This is an all-in-one file-modification tool. It supports the following Pydantic Object payload types:
|
|
127
156
|
1. ContentPayload: {{ file_path="example.py", "content": "…", "overwrite": true|false }} → Create or overwrite a file with the provided content.
|
|
128
157
|
2. ReplacementsPayload: {{ file_path="example.py", "replacements": [ {{ "old_str": "…", "new_str": "…" }}, … ] }} → Perform exact text replacements inside an existing file.
|
|
129
158
|
3. DeleteSnippetPayload: {{ file_path="example.py", "delete_snippet": "…" }} → Remove a snippet of text from an existing file.
|
|
130
159
|
|
|
131
160
|
Arguments:
|
|
161
|
+
- agent_config (required): The agent configuration dictionary built so far.
|
|
132
162
|
- payload (required): One of the Pydantic payload types above.
|
|
133
163
|
|
|
134
164
|
Example (create):
|
|
@@ -271,11 +301,12 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
271
301
|
3. **🎯 SUGGEST TOOLS** based on their answer with explanations
|
|
272
302
|
4. **📋 SHOW ALL TOOLS** so they know all options
|
|
273
303
|
5. **✅ CONFIRM TOOL SELECTION** and explain choices
|
|
274
|
-
6. **
|
|
275
|
-
7. **
|
|
276
|
-
8.
|
|
277
|
-
9.
|
|
278
|
-
10.
|
|
304
|
+
6. **Ask about model pinning**: "Do you want to pin a specific model to this agent?" with list of options
|
|
305
|
+
7. **Craft system prompt** that defines agent behavior, including ALL detailed tool documentation for selected tools
|
|
306
|
+
8. **Generate complete JSON** with proper structure
|
|
307
|
+
9. **🚨 MANDATORY: ASK FOR USER CONFIRMATION** of the generated JSON
|
|
308
|
+
10. **🤖 AUTOMATICALLY CREATE THE FILE** once user confirms (no additional asking)
|
|
309
|
+
11. **Validate and test** the new agent
|
|
279
310
|
|
|
280
311
|
## CRITICAL WORKFLOW RULES:
|
|
281
312
|
|
|
@@ -302,6 +333,14 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
302
333
|
**For "File organizer":** → Suggest `list_files`, `read_file`, `edit_file`, `delete_file`, `agent_share_your_reasoning`
|
|
303
334
|
**For "Agent orchestrator":** → Suggest `list_agents`, `invoke_agent`, `agent_share_your_reasoning`
|
|
304
335
|
|
|
336
|
+
## Model Selection Guidance:
|
|
337
|
+
|
|
338
|
+
**For code-heavy tasks**: → Suggest `Cerebras-Qwen3-Coder-480b`, `grok-code-fast-1`, or `gpt-4.1`
|
|
339
|
+
**For document analysis**: → Suggest `gemini-2.5-flash-preview-05-20` or `claude-4-0-sonnet`
|
|
340
|
+
**For general reasoning**: → Suggest `gpt-5` or `o3`
|
|
341
|
+
**For cost-conscious tasks**: → Suggest `gpt-4.1-mini` or `gpt-4.1-nano`
|
|
342
|
+
**For local/private work**: → Suggest `ollama-llama3.3` or `gpt-4.1-custom`
|
|
343
|
+
|
|
305
344
|
## Best Practices
|
|
306
345
|
|
|
307
346
|
- Use descriptive names with hyphens (e.g., "python-tutor", "code-reviewer")
|
|
@@ -320,6 +359,7 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
320
359
|
"name": "python-tutor",
|
|
321
360
|
"display_name": "Python Tutor 🐍",
|
|
322
361
|
"description": "Teaches Python programming concepts with examples",
|
|
362
|
+
"model": "gpt-5",
|
|
323
363
|
"system_prompt": [
|
|
324
364
|
"You are a patient Python programming tutor.",
|
|
325
365
|
"You explain concepts clearly with practical examples.",
|
|
@@ -327,7 +367,8 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
327
367
|
"Always encourage learning and provide constructive feedback."
|
|
328
368
|
],
|
|
329
369
|
"tools": ["read_file", "edit_file", "agent_share_your_reasoning"],
|
|
330
|
-
"user_prompt": "What Python concept would you like to learn today?"
|
|
370
|
+
"user_prompt": "What Python concept would you like to learn today?",
|
|
371
|
+
"model": "Cerebras-Qwen3-Coder-480b" // Optional: Pin to a specific code model
|
|
331
372
|
}}
|
|
332
373
|
```
|
|
333
374
|
|
|
@@ -344,7 +385,8 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
344
385
|
"You follow language-specific best practices and conventions."
|
|
345
386
|
],
|
|
346
387
|
"tools": ["list_files", "read_file", "grep", "agent_share_your_reasoning"],
|
|
347
|
-
"user_prompt": "Which code would you like me to review?"
|
|
388
|
+
"user_prompt": "Which code would you like me to review?",
|
|
389
|
+
"model": "claude-4-0-sonnet" // Optional: Pin to a model good at analysis
|
|
348
390
|
}}
|
|
349
391
|
```
|
|
350
392
|
|
|
@@ -360,7 +402,8 @@ This detailed documentation should be copied verbatim into any agent that will b
|
|
|
360
402
|
"You coordinate between multiple agents to get complex work done."
|
|
361
403
|
],
|
|
362
404
|
"tools": ["list_agents", "invoke_agent", "agent_share_your_reasoning"],
|
|
363
|
-
"user_prompt": "What can I help you accomplish today?"
|
|
405
|
+
"user_prompt": "What can I help you accomplish today?",
|
|
406
|
+
"model": "gpt-5" // Optional: Pin to a reasoning-focused model
|
|
364
407
|
}}
|
|
365
408
|
```
|
|
366
409
|
|
|
@@ -370,9 +413,11 @@ Be interactive - ask questions, suggest improvements, and guide users through th
|
|
|
370
413
|
|
|
371
414
|
## REMEMBER: COMPLETE THE WORKFLOW!
|
|
372
415
|
- After generating JSON, ALWAYS get confirmation
|
|
416
|
+
- Ask about model pinning using your `ask_about_model_pinning` method
|
|
373
417
|
- Once confirmed, IMMEDIATELY create the file (don't ask again)
|
|
374
418
|
- Use your `edit_file` tool to save the JSON
|
|
375
419
|
- Always explain how to use the new agent with `/agent agent-name`
|
|
420
|
+
- Mention that users can later change or pin the model with `/pin_model agent-name model-name`
|
|
376
421
|
|
|
377
422
|
## Tool Documentation Requirements
|
|
378
423
|
|
|
@@ -390,7 +435,14 @@ Your goal is to take users from idea to working agent in one smooth conversation
|
|
|
390
435
|
|
|
391
436
|
def get_available_tools(self) -> List[str]:
|
|
392
437
|
"""Get all tools needed for agent creation."""
|
|
393
|
-
return [
|
|
438
|
+
return [
|
|
439
|
+
"list_files",
|
|
440
|
+
"read_file",
|
|
441
|
+
"edit_file",
|
|
442
|
+
"agent_share_your_reasoning",
|
|
443
|
+
"list_agents",
|
|
444
|
+
"invoke_agent",
|
|
445
|
+
]
|
|
394
446
|
|
|
395
447
|
def validate_agent_json(self, agent_config: Dict) -> List[str]:
|
|
396
448
|
"""Validate a JSON agent configuration.
|
|
@@ -485,4 +537,4 @@ Your goal is to take users from idea to working agent in one smooth conversation
|
|
|
485
537
|
|
|
486
538
|
def get_user_prompt(self) -> Optional[str]:
|
|
487
539
|
"""Get the initial user prompt."""
|
|
488
|
-
return "Hi! I'm the Agent Creator 🏗️ Let's build an awesome agent together!"
|
|
540
|
+
return "Hi! I'm the Agent Creator 🏗️ Let's build an awesome agent together!"
|
code_puppy/agents/json_agent.py
CHANGED
|
@@ -101,6 +101,14 @@ class JSONAgent(BaseAgent):
|
|
|
101
101
|
"""Get tool configuration from JSON config."""
|
|
102
102
|
return self._config.get("tools_config")
|
|
103
103
|
|
|
104
|
+
def get_model_name(self) -> Optional[str]:
|
|
105
|
+
"""Get pinned model name from JSON config, if specified.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Model name to use for this agent, or None to use global default.
|
|
109
|
+
"""
|
|
110
|
+
return self._config.get("model")
|
|
111
|
+
|
|
104
112
|
|
|
105
113
|
def discover_json_agents() -> Dict[str, str]:
|
|
106
114
|
"""Discover JSON agent files in the user's agents directory.
|
|
@@ -27,7 +27,7 @@ from pydantic_ai import Agent
|
|
|
27
27
|
from pydantic_ai.exceptions import UsageLimitExceeded
|
|
28
28
|
from pydantic_ai.usage import UsageLimits
|
|
29
29
|
|
|
30
|
-
from code_puppy.messaging.message_queue import emit_info
|
|
30
|
+
from code_puppy.messaging.message_queue import emit_info
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
class RuntimeAgentManager:
|
|
@@ -113,7 +113,10 @@ class RuntimeAgentManager:
|
|
|
113
113
|
return await agent.run(prompt, usage_limits=usage_limits, **kwargs)
|
|
114
114
|
except* UsageLimitExceeded as ule:
|
|
115
115
|
emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
|
|
116
|
-
emit_info(
|
|
116
|
+
emit_info(
|
|
117
|
+
"The agent has reached its usage limit. You can ask it to continue by saying 'please continue' or similar.",
|
|
118
|
+
group_id=group_id,
|
|
119
|
+
)
|
|
117
120
|
except* mcp.shared.exceptions.McpError as mcp_error:
|
|
118
121
|
emit_info(f"MCP server error: {str(mcp_error)}", group_id=group_id)
|
|
119
122
|
emit_info(f"{str(mcp_error)}", group_id=group_id)
|
|
@@ -132,7 +135,9 @@ class RuntimeAgentManager:
|
|
|
132
135
|
if isinstance(exc, ExceptionGroup):
|
|
133
136
|
for sub_exc in exc.exceptions:
|
|
134
137
|
collect_non_cancelled_exceptions(sub_exc)
|
|
135
|
-
elif not isinstance(
|
|
138
|
+
elif not isinstance(
|
|
139
|
+
exc, (asyncio.CancelledError, UsageLimitExceeded)
|
|
140
|
+
):
|
|
136
141
|
remaining_exceptions.append(exc)
|
|
137
142
|
emit_info(f"Unexpected error: {str(exc)}", group_id=group_id)
|
|
138
143
|
emit_info(f"{str(exc.args)}", group_id=group_id)
|
|
@@ -226,7 +231,10 @@ class RuntimeAgentManager:
|
|
|
226
231
|
except UsageLimitExceeded as ule:
|
|
227
232
|
group_id = str(uuid.uuid4())
|
|
228
233
|
emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
|
|
229
|
-
emit_info(
|
|
234
|
+
emit_info(
|
|
235
|
+
"The agent has reached its usage limit. You can ask it to continue by saying 'please continue' or similar.",
|
|
236
|
+
group_id=group_id,
|
|
237
|
+
)
|
|
230
238
|
# Return None or some default value to indicate the limit was reached
|
|
231
239
|
return None
|
|
232
240
|
|
|
@@ -42,6 +42,10 @@ def get_commands_help():
|
|
|
42
42
|
help_lines.append(
|
|
43
43
|
Text("/model, /m", style="cyan") + Text(" <model> Set active model")
|
|
44
44
|
)
|
|
45
|
+
help_lines.append(
|
|
46
|
+
Text("/pin_model", style="cyan")
|
|
47
|
+
+ Text(" <agent> <model> Pin a specific model to an agent")
|
|
48
|
+
)
|
|
45
49
|
help_lines.append(
|
|
46
50
|
Text("/mcp", style="cyan")
|
|
47
51
|
+ Text(" Manage MCP servers (list, start, stop, status, etc.)")
|
|
@@ -398,6 +402,85 @@ def handle_command(command: str):
|
|
|
398
402
|
emit_info(help_text, message_group_id=group_id)
|
|
399
403
|
return True
|
|
400
404
|
|
|
405
|
+
if command.startswith("/pin_model"):
|
|
406
|
+
# Handle agent model pinning
|
|
407
|
+
from code_puppy.agents.json_agent import discover_json_agents
|
|
408
|
+
from code_puppy.command_line.model_picker_completion import load_model_names
|
|
409
|
+
import json
|
|
410
|
+
|
|
411
|
+
tokens = command.split()
|
|
412
|
+
|
|
413
|
+
if len(tokens) != 3:
|
|
414
|
+
emit_warning("Usage: /pin_model <agent-name> <model-name>")
|
|
415
|
+
|
|
416
|
+
# Show available models and JSON agents
|
|
417
|
+
available_models = load_model_names()
|
|
418
|
+
json_agents = discover_json_agents()
|
|
419
|
+
|
|
420
|
+
emit_info("Available models:")
|
|
421
|
+
for model in available_models:
|
|
422
|
+
emit_info(f" [cyan]{model}[/cyan]")
|
|
423
|
+
|
|
424
|
+
if json_agents:
|
|
425
|
+
emit_info("\nAvailable JSON agents:")
|
|
426
|
+
for agent_name, agent_path in json_agents.items():
|
|
427
|
+
emit_info(f" [cyan]{agent_name}[/cyan] ({agent_path})")
|
|
428
|
+
return True
|
|
429
|
+
|
|
430
|
+
agent_name = tokens[1].lower()
|
|
431
|
+
model_name = tokens[2]
|
|
432
|
+
|
|
433
|
+
# Check if model exists
|
|
434
|
+
available_models = load_model_names()
|
|
435
|
+
if model_name not in available_models:
|
|
436
|
+
emit_error(f"Model '{model_name}' not found")
|
|
437
|
+
emit_warning(f"Available models: {', '.join(available_models)}")
|
|
438
|
+
return True
|
|
439
|
+
|
|
440
|
+
# Check that we're modifying a JSON agent (not a built-in Python agent)
|
|
441
|
+
json_agents = discover_json_agents()
|
|
442
|
+
if agent_name not in json_agents:
|
|
443
|
+
emit_error(f"JSON agent '{agent_name}' not found")
|
|
444
|
+
|
|
445
|
+
# Show available JSON agents
|
|
446
|
+
if json_agents:
|
|
447
|
+
emit_info("Available JSON agents:")
|
|
448
|
+
for name, path in json_agents.items():
|
|
449
|
+
emit_info(f" [cyan]{name}[/cyan] ({path})")
|
|
450
|
+
return True
|
|
451
|
+
|
|
452
|
+
agent_file_path = json_agents[agent_name]
|
|
453
|
+
|
|
454
|
+
# Load, modify, and save the agent configuration
|
|
455
|
+
try:
|
|
456
|
+
with open(agent_file_path, "r", encoding="utf-8") as f:
|
|
457
|
+
agent_config = json.load(f)
|
|
458
|
+
|
|
459
|
+
# Set the model
|
|
460
|
+
agent_config["model"] = model_name
|
|
461
|
+
|
|
462
|
+
# Save the updated configuration
|
|
463
|
+
with open(agent_file_path, "w", encoding="utf-8") as f:
|
|
464
|
+
json.dump(agent_config, f, indent=2, ensure_ascii=False)
|
|
465
|
+
|
|
466
|
+
emit_success(f"Model '{model_name}' pinned to agent '{agent_name}'")
|
|
467
|
+
|
|
468
|
+
# If this is the current agent, reload it to use the new model
|
|
469
|
+
from code_puppy.agents import get_current_agent_config
|
|
470
|
+
from code_puppy.agents.runtime_manager import get_runtime_agent_manager
|
|
471
|
+
|
|
472
|
+
current_agent = get_current_agent_config()
|
|
473
|
+
if current_agent.name == agent_name:
|
|
474
|
+
manager = get_runtime_agent_manager()
|
|
475
|
+
manager.reload_agent()
|
|
476
|
+
emit_info(f"Active agent reloaded with pinned model '{model_name}'")
|
|
477
|
+
|
|
478
|
+
return True
|
|
479
|
+
|
|
480
|
+
except Exception as e:
|
|
481
|
+
emit_error(f"Failed to pin model to agent '{agent_name}': {e}")
|
|
482
|
+
return True
|
|
483
|
+
|
|
401
484
|
if command.startswith("/generate-pr-description"):
|
|
402
485
|
# Parse directory argument (e.g., /generate-pr-description @some/dir)
|
|
403
486
|
tokens = command.split()
|
|
@@ -158,10 +158,59 @@ class InstallCommand(MCPCommandBase):
|
|
|
158
158
|
emit_info("Installation cancelled", message_group=group_id)
|
|
159
159
|
return False
|
|
160
160
|
|
|
161
|
-
#
|
|
161
|
+
# Collect environment variables and command line arguments
|
|
162
162
|
env_vars = {}
|
|
163
163
|
cmd_args = {}
|
|
164
164
|
|
|
165
|
+
# Get environment variables
|
|
166
|
+
required_env_vars = selected_server.get_environment_vars()
|
|
167
|
+
if required_env_vars:
|
|
168
|
+
emit_info(
|
|
169
|
+
"\n[yellow]Required Environment Variables:[/yellow]",
|
|
170
|
+
message_group=group_id,
|
|
171
|
+
)
|
|
172
|
+
for var in required_env_vars:
|
|
173
|
+
# Check if already set in environment
|
|
174
|
+
import os
|
|
175
|
+
|
|
176
|
+
current_value = os.environ.get(var, "")
|
|
177
|
+
if current_value:
|
|
178
|
+
emit_info(
|
|
179
|
+
f" {var}: [green]Already set[/green]",
|
|
180
|
+
message_group=group_id,
|
|
181
|
+
)
|
|
182
|
+
env_vars[var] = current_value
|
|
183
|
+
else:
|
|
184
|
+
value = emit_prompt(f" Enter value for {var}: ").strip()
|
|
185
|
+
if value:
|
|
186
|
+
env_vars[var] = value
|
|
187
|
+
|
|
188
|
+
# Get command line arguments
|
|
189
|
+
required_cmd_args = selected_server.get_command_line_args()
|
|
190
|
+
if required_cmd_args:
|
|
191
|
+
emit_info(
|
|
192
|
+
"\n[yellow]Command Line Arguments:[/yellow]", message_group=group_id
|
|
193
|
+
)
|
|
194
|
+
for arg_config in required_cmd_args:
|
|
195
|
+
name = arg_config.get("name", "")
|
|
196
|
+
prompt = arg_config.get("prompt", name)
|
|
197
|
+
default = arg_config.get("default", "")
|
|
198
|
+
required = arg_config.get("required", True)
|
|
199
|
+
|
|
200
|
+
# If required or has default, prompt user
|
|
201
|
+
if required or default:
|
|
202
|
+
arg_prompt = f" {prompt}"
|
|
203
|
+
if default:
|
|
204
|
+
arg_prompt += f" [{default}]"
|
|
205
|
+
if not required:
|
|
206
|
+
arg_prompt += " (optional)"
|
|
207
|
+
|
|
208
|
+
value = emit_prompt(f"{arg_prompt}: ").strip()
|
|
209
|
+
if value:
|
|
210
|
+
cmd_args[name] = value
|
|
211
|
+
elif default:
|
|
212
|
+
cmd_args[name] = default
|
|
213
|
+
|
|
165
214
|
# Install the server
|
|
166
215
|
return install_server_from_catalog(
|
|
167
216
|
self.manager, selected_server, server_name, env_vars, cmd_args, group_id
|
|
@@ -43,9 +43,61 @@ def run_interactive_install_wizard(manager, group_id: str) -> bool:
|
|
|
43
43
|
if not server_name:
|
|
44
44
|
return False
|
|
45
45
|
|
|
46
|
+
# Collect environment variables and command line arguments
|
|
47
|
+
env_vars = {}
|
|
48
|
+
cmd_args = {}
|
|
49
|
+
|
|
50
|
+
# Get environment variables
|
|
51
|
+
required_env_vars = selected_server.get_environment_vars()
|
|
52
|
+
if required_env_vars:
|
|
53
|
+
emit_info(
|
|
54
|
+
"\n[yellow]Required Environment Variables:[/yellow]",
|
|
55
|
+
message_group=group_id,
|
|
56
|
+
)
|
|
57
|
+
for var in required_env_vars:
|
|
58
|
+
# Check if already set in environment
|
|
59
|
+
import os
|
|
60
|
+
|
|
61
|
+
current_value = os.environ.get(var, "")
|
|
62
|
+
if current_value:
|
|
63
|
+
emit_info(
|
|
64
|
+
f" {var}: [green]Already set[/green]", message_group=group_id
|
|
65
|
+
)
|
|
66
|
+
env_vars[var] = current_value
|
|
67
|
+
else:
|
|
68
|
+
value = emit_prompt(f" Enter value for {var}: ").strip()
|
|
69
|
+
if value:
|
|
70
|
+
env_vars[var] = value
|
|
71
|
+
|
|
72
|
+
# Get command line arguments
|
|
73
|
+
required_cmd_args = selected_server.get_command_line_args()
|
|
74
|
+
if required_cmd_args:
|
|
75
|
+
emit_info(
|
|
76
|
+
"\n[yellow]Command Line Arguments:[/yellow]", message_group=group_id
|
|
77
|
+
)
|
|
78
|
+
for arg_config in required_cmd_args:
|
|
79
|
+
name = arg_config.get("name", "")
|
|
80
|
+
prompt = arg_config.get("prompt", name)
|
|
81
|
+
default = arg_config.get("default", "")
|
|
82
|
+
required = arg_config.get("required", True)
|
|
83
|
+
|
|
84
|
+
# If required or has default, prompt user
|
|
85
|
+
if required or default:
|
|
86
|
+
arg_prompt = f" {prompt}"
|
|
87
|
+
if default:
|
|
88
|
+
arg_prompt += f" [{default}]"
|
|
89
|
+
if not required:
|
|
90
|
+
arg_prompt += " (optional)"
|
|
91
|
+
|
|
92
|
+
value = emit_prompt(f"{arg_prompt}: ").strip()
|
|
93
|
+
if value:
|
|
94
|
+
cmd_args[name] = value
|
|
95
|
+
elif default:
|
|
96
|
+
cmd_args[name] = default
|
|
97
|
+
|
|
46
98
|
# Configure the server
|
|
47
99
|
return interactive_configure_server(
|
|
48
|
-
manager, selected_server, server_name, group_id
|
|
100
|
+
manager, selected_server, server_name, group_id, env_vars, cmd_args
|
|
49
101
|
)
|
|
50
102
|
|
|
51
103
|
except ImportError:
|
|
@@ -131,7 +183,12 @@ def interactive_get_server_name(selected_server, group_id: str) -> Optional[str]
|
|
|
131
183
|
|
|
132
184
|
|
|
133
185
|
def interactive_configure_server(
|
|
134
|
-
manager,
|
|
186
|
+
manager,
|
|
187
|
+
selected_server,
|
|
188
|
+
server_name: str,
|
|
189
|
+
group_id: str,
|
|
190
|
+
env_vars: Dict[str, Any],
|
|
191
|
+
cmd_args: Dict[str, Any],
|
|
135
192
|
) -> bool:
|
|
136
193
|
"""
|
|
137
194
|
Configure and install the selected server.
|
|
@@ -151,15 +208,20 @@ def interactive_configure_server(
|
|
|
151
208
|
emit_info("Installation cancelled", message_group=group_id)
|
|
152
209
|
return False
|
|
153
210
|
|
|
154
|
-
# For now, use defaults - a full implementation would collect env vars, etc.
|
|
155
|
-
# requirements = selected_server.get_requirements() # TODO: Use for validation
|
|
156
|
-
env_vars = {}
|
|
157
|
-
cmd_args = {}
|
|
158
|
-
|
|
159
211
|
# Show confirmation
|
|
160
212
|
emit_info(f"Installing: {selected_server.display_name}", message_group=group_id)
|
|
161
213
|
emit_info(f"Name: {server_name}", message_group=group_id)
|
|
162
214
|
|
|
215
|
+
if env_vars:
|
|
216
|
+
emit_info("Environment Variables:", message_group=group_id)
|
|
217
|
+
for var, value in env_vars.items():
|
|
218
|
+
emit_info(f" {var}: [hidden]{value}[/hidden]", message_group=group_id)
|
|
219
|
+
|
|
220
|
+
if cmd_args:
|
|
221
|
+
emit_info("Command Line Arguments:", message_group=group_id)
|
|
222
|
+
for arg, value in cmd_args.items():
|
|
223
|
+
emit_info(f" {arg}: {value}", message_group=group_id)
|
|
224
|
+
|
|
163
225
|
confirm = emit_prompt("Proceed with installation? [Y/n]: ")
|
|
164
226
|
if confirm.lower().startswith("n"):
|
|
165
227
|
emit_info("Installation cancelled", message_group=group_id)
|
|
@@ -196,18 +258,25 @@ def install_server_from_catalog(
|
|
|
196
258
|
from code_puppy.config import MCP_SERVERS_FILE
|
|
197
259
|
from code_puppy.mcp.managed_server import ServerConfig
|
|
198
260
|
|
|
199
|
-
#
|
|
200
|
-
|
|
261
|
+
# Set environment variables in the current environment
|
|
262
|
+
for var, value in env_vars.items():
|
|
263
|
+
os.environ[var] = value
|
|
201
264
|
|
|
202
|
-
#
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
265
|
+
# Get server config with command line argument overrides
|
|
266
|
+
config_dict = selected_server.to_server_config(server_name, **cmd_args)
|
|
267
|
+
|
|
268
|
+
# Update the config with actual environment variable values
|
|
269
|
+
if "env" in config_dict:
|
|
270
|
+
for env_key, env_value in config_dict["env"].items():
|
|
271
|
+
# If it's a placeholder like $GITHUB_TOKEN, replace with actual value
|
|
272
|
+
if env_value.startswith("$"):
|
|
273
|
+
var_name = env_value[1:] # Remove the $
|
|
274
|
+
if var_name in env_vars:
|
|
275
|
+
config_dict["env"][env_key] = env_vars[var_name]
|
|
207
276
|
|
|
208
277
|
# Create ServerConfig
|
|
209
278
|
server_config = ServerConfig(
|
|
210
|
-
id=
|
|
279
|
+
id=server_name,
|
|
211
280
|
name=server_name,
|
|
212
281
|
type=selected_server.type,
|
|
213
282
|
enabled=True,
|
|
@@ -234,8 +303,10 @@ def install_server_from_catalog(
|
|
|
234
303
|
data = {"mcp_servers": servers}
|
|
235
304
|
|
|
236
305
|
# Add new server
|
|
237
|
-
|
|
238
|
-
|
|
306
|
+
# Copy the config dict and add type before saving
|
|
307
|
+
save_config = config_dict.copy()
|
|
308
|
+
save_config["type"] = selected_server.type
|
|
309
|
+
servers[server_name] = save_config
|
|
239
310
|
|
|
240
311
|
# Save back
|
|
241
312
|
os.makedirs(os.path.dirname(MCP_SERVERS_FILE), exist_ok=True)
|
|
@@ -139,12 +139,28 @@ def get_prompt_with_active_model(base: str = ">>> "):
|
|
|
139
139
|
from code_puppy.agents.agent_manager import get_current_agent_config
|
|
140
140
|
|
|
141
141
|
puppy = get_puppy_name()
|
|
142
|
-
|
|
142
|
+
global_model = get_active_model() or "(default)"
|
|
143
143
|
|
|
144
144
|
# Get current agent information
|
|
145
145
|
current_agent = get_current_agent_config()
|
|
146
146
|
agent_display = current_agent.display_name if current_agent else "code-puppy"
|
|
147
147
|
|
|
148
|
+
# Check if current agent has a pinned model
|
|
149
|
+
agent_model = None
|
|
150
|
+
if current_agent and hasattr(current_agent, 'get_model_name'):
|
|
151
|
+
agent_model = current_agent.get_model_name()
|
|
152
|
+
|
|
153
|
+
# Determine which model to display
|
|
154
|
+
if agent_model and agent_model != global_model:
|
|
155
|
+
# Show both models when they differ
|
|
156
|
+
model_display = f"[{global_model} → {agent_model}]"
|
|
157
|
+
elif agent_model:
|
|
158
|
+
# Show only the agent model when pinned
|
|
159
|
+
model_display = f"[{agent_model}]"
|
|
160
|
+
else:
|
|
161
|
+
# Show only the global model when no agent model is pinned
|
|
162
|
+
model_display = f"[{global_model}]"
|
|
163
|
+
|
|
148
164
|
cwd = os.getcwd()
|
|
149
165
|
home = os.path.expanduser("~")
|
|
150
166
|
if cwd.startswith(home):
|
|
@@ -157,7 +173,7 @@ def get_prompt_with_active_model(base: str = ">>> "):
|
|
|
157
173
|
("class:puppy", f"{puppy}"),
|
|
158
174
|
("", " "),
|
|
159
175
|
("class:agent", f"[{agent_display}] "),
|
|
160
|
-
("class:model",
|
|
176
|
+
("class:model", model_display + " "),
|
|
161
177
|
("class:cwd", "(" + str(cwd_display) + ") "),
|
|
162
178
|
("class:arrow", str(base)),
|
|
163
179
|
]
|