claude-mpm 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of claude-mpm might be problematic. Click here for more details.
- claude_mpm/__init__.py +17 -0
- claude_mpm/__main__.py +14 -0
- claude_mpm/_version.py +32 -0
- claude_mpm/agents/BASE_AGENT_TEMPLATE.md +88 -0
- claude_mpm/agents/INSTRUCTIONS.md +375 -0
- claude_mpm/agents/__init__.py +118 -0
- claude_mpm/agents/agent_loader.py +621 -0
- claude_mpm/agents/agent_loader_integration.py +229 -0
- claude_mpm/agents/agents_metadata.py +204 -0
- claude_mpm/agents/base_agent.json +27 -0
- claude_mpm/agents/base_agent_loader.py +519 -0
- claude_mpm/agents/schema/agent_schema.json +160 -0
- claude_mpm/agents/system_agent_config.py +587 -0
- claude_mpm/agents/templates/__init__.py +101 -0
- claude_mpm/agents/templates/data_engineer_agent.json +46 -0
- claude_mpm/agents/templates/documentation_agent.json +45 -0
- claude_mpm/agents/templates/engineer_agent.json +49 -0
- claude_mpm/agents/templates/ops_agent.json +46 -0
- claude_mpm/agents/templates/qa_agent.json +45 -0
- claude_mpm/agents/templates/research_agent.json +49 -0
- claude_mpm/agents/templates/security_agent.json +46 -0
- claude_mpm/agents/templates/update-optimized-specialized-agents.json +374 -0
- claude_mpm/agents/templates/version_control_agent.json +46 -0
- claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +6 -0
- claude_mpm/cli.py +655 -0
- claude_mpm/cli_main.py +13 -0
- claude_mpm/cli_module/__init__.py +15 -0
- claude_mpm/cli_module/args.py +222 -0
- claude_mpm/cli_module/commands.py +203 -0
- claude_mpm/cli_module/migration_example.py +183 -0
- claude_mpm/cli_module/refactoring_guide.md +253 -0
- claude_mpm/cli_old/__init__.py +1 -0
- claude_mpm/cli_old/ticket_cli.py +102 -0
- claude_mpm/config/__init__.py +5 -0
- claude_mpm/config/hook_config.py +42 -0
- claude_mpm/constants.py +150 -0
- claude_mpm/core/__init__.py +45 -0
- claude_mpm/core/agent_name_normalizer.py +248 -0
- claude_mpm/core/agent_registry.py +627 -0
- claude_mpm/core/agent_registry.py.bak +312 -0
- claude_mpm/core/agent_session_manager.py +273 -0
- claude_mpm/core/base_service.py +747 -0
- claude_mpm/core/base_service.py.bak +406 -0
- claude_mpm/core/config.py +334 -0
- claude_mpm/core/config_aliases.py +292 -0
- claude_mpm/core/container.py +347 -0
- claude_mpm/core/factories.py +281 -0
- claude_mpm/core/framework_loader.py +472 -0
- claude_mpm/core/injectable_service.py +206 -0
- claude_mpm/core/interfaces.py +539 -0
- claude_mpm/core/logger.py +468 -0
- claude_mpm/core/minimal_framework_loader.py +107 -0
- claude_mpm/core/mixins.py +150 -0
- claude_mpm/core/service_registry.py +299 -0
- claude_mpm/core/session_manager.py +190 -0
- claude_mpm/core/simple_runner.py +511 -0
- claude_mpm/core/tool_access_control.py +173 -0
- claude_mpm/hooks/README.md +243 -0
- claude_mpm/hooks/__init__.py +5 -0
- claude_mpm/hooks/base_hook.py +154 -0
- claude_mpm/hooks/builtin/__init__.py +1 -0
- claude_mpm/hooks/builtin/logging_hook_example.py +165 -0
- claude_mpm/hooks/builtin/post_delegation_hook_example.py +124 -0
- claude_mpm/hooks/builtin/pre_delegation_hook_example.py +125 -0
- claude_mpm/hooks/builtin/submit_hook_example.py +100 -0
- claude_mpm/hooks/builtin/ticket_extraction_hook_example.py +237 -0
- claude_mpm/hooks/builtin/todo_agent_prefix_hook.py +239 -0
- claude_mpm/hooks/builtin/workflow_start_hook.py +181 -0
- claude_mpm/hooks/hook_client.py +264 -0
- claude_mpm/hooks/hook_runner.py +370 -0
- claude_mpm/hooks/json_rpc_executor.py +259 -0
- claude_mpm/hooks/json_rpc_hook_client.py +319 -0
- claude_mpm/hooks/tool_call_interceptor.py +204 -0
- claude_mpm/init.py +246 -0
- claude_mpm/orchestration/SUBPROCESS_DESIGN.md +66 -0
- claude_mpm/orchestration/__init__.py +6 -0
- claude_mpm/orchestration/archive/direct_orchestrator.py +195 -0
- claude_mpm/orchestration/archive/factory.py +215 -0
- claude_mpm/orchestration/archive/hook_enabled_orchestrator.py +188 -0
- claude_mpm/orchestration/archive/hook_integration_example.py +178 -0
- claude_mpm/orchestration/archive/interactive_subprocess_orchestrator.py +826 -0
- claude_mpm/orchestration/archive/orchestrator.py +501 -0
- claude_mpm/orchestration/archive/pexpect_orchestrator.py +252 -0
- claude_mpm/orchestration/archive/pty_orchestrator.py +270 -0
- claude_mpm/orchestration/archive/simple_orchestrator.py +82 -0
- claude_mpm/orchestration/archive/subprocess_orchestrator.py +801 -0
- claude_mpm/orchestration/archive/system_prompt_orchestrator.py +278 -0
- claude_mpm/orchestration/archive/wrapper_orchestrator.py +187 -0
- claude_mpm/scripts/__init__.py +1 -0
- claude_mpm/scripts/ticket.py +269 -0
- claude_mpm/services/__init__.py +10 -0
- claude_mpm/services/agent_deployment.py +955 -0
- claude_mpm/services/agent_lifecycle_manager.py +948 -0
- claude_mpm/services/agent_management_service.py +596 -0
- claude_mpm/services/agent_modification_tracker.py +841 -0
- claude_mpm/services/agent_profile_loader.py +606 -0
- claude_mpm/services/agent_registry.py +677 -0
- claude_mpm/services/base_agent_manager.py +380 -0
- claude_mpm/services/framework_agent_loader.py +337 -0
- claude_mpm/services/framework_claude_md_generator/README.md +92 -0
- claude_mpm/services/framework_claude_md_generator/__init__.py +206 -0
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +151 -0
- claude_mpm/services/framework_claude_md_generator/content_validator.py +126 -0
- claude_mpm/services/framework_claude_md_generator/deployment_manager.py +137 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/__init__.py +106 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/agents.py +582 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/claude_pm_init.py +97 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/core_responsibilities.py +27 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/delegation_constraints.py +23 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/environment_config.py +23 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/footer.py +20 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/header.py +26 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/orchestration_principles.py +30 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/role_designation.py +37 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/subprocess_validation.py +111 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/todo_task_tools.py +89 -0
- claude_mpm/services/framework_claude_md_generator/section_generators/troubleshooting.py +39 -0
- claude_mpm/services/framework_claude_md_generator/section_manager.py +106 -0
- claude_mpm/services/framework_claude_md_generator/version_manager.py +121 -0
- claude_mpm/services/framework_claude_md_generator.py +621 -0
- claude_mpm/services/hook_service.py +388 -0
- claude_mpm/services/hook_service_manager.py +223 -0
- claude_mpm/services/json_rpc_hook_manager.py +92 -0
- claude_mpm/services/parent_directory_manager/README.md +83 -0
- claude_mpm/services/parent_directory_manager/__init__.py +577 -0
- claude_mpm/services/parent_directory_manager/backup_manager.py +258 -0
- claude_mpm/services/parent_directory_manager/config_manager.py +210 -0
- claude_mpm/services/parent_directory_manager/deduplication_manager.py +279 -0
- claude_mpm/services/parent_directory_manager/framework_protector.py +143 -0
- claude_mpm/services/parent_directory_manager/operations.py +186 -0
- claude_mpm/services/parent_directory_manager/state_manager.py +624 -0
- claude_mpm/services/parent_directory_manager/template_deployer.py +579 -0
- claude_mpm/services/parent_directory_manager/validation_manager.py +378 -0
- claude_mpm/services/parent_directory_manager/version_control_helper.py +339 -0
- claude_mpm/services/parent_directory_manager/version_manager.py +222 -0
- claude_mpm/services/shared_prompt_cache.py +819 -0
- claude_mpm/services/ticket_manager.py +213 -0
- claude_mpm/services/ticket_manager_di.py +318 -0
- claude_mpm/services/ticketing_service_original.py +508 -0
- claude_mpm/services/version_control/VERSION +1 -0
- claude_mpm/services/version_control/__init__.py +70 -0
- claude_mpm/services/version_control/branch_strategy.py +670 -0
- claude_mpm/services/version_control/conflict_resolution.py +744 -0
- claude_mpm/services/version_control/git_operations.py +784 -0
- claude_mpm/services/version_control/semantic_versioning.py +703 -0
- claude_mpm/ui/__init__.py +1 -0
- claude_mpm/ui/rich_terminal_ui.py +295 -0
- claude_mpm/ui/terminal_ui.py +328 -0
- claude_mpm/utils/__init__.py +16 -0
- claude_mpm/utils/config_manager.py +468 -0
- claude_mpm/utils/import_migration_example.py +80 -0
- claude_mpm/utils/imports.py +182 -0
- claude_mpm/utils/path_operations.py +357 -0
- claude_mpm/utils/paths.py +289 -0
- claude_mpm-0.3.0.dist-info/METADATA +290 -0
- claude_mpm-0.3.0.dist-info/RECORD +159 -0
- claude_mpm-0.3.0.dist-info/WHEEL +5 -0
- claude_mpm-0.3.0.dist-info/entry_points.txt +4 -0
- claude_mpm-0.3.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,955 @@
|
|
|
1
|
+
"""Agent deployment service for Claude Code native subagents."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import logging
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, List, Dict, Any
|
|
8
|
+
|
|
9
|
+
from claude_mpm.core.logger import get_logger
|
|
10
|
+
from claude_mpm.constants import EnvironmentVars, Paths, AgentMetadata
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AgentDeploymentService:
|
|
14
|
+
"""Service for deploying Claude Code native agents."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, templates_dir: Optional[Path] = None, base_agent_path: Optional[Path] = None):
|
|
17
|
+
"""
|
|
18
|
+
Initialize agent deployment service.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
templates_dir: Directory containing agent template files
|
|
22
|
+
base_agent_path: Path to base_agent.md file
|
|
23
|
+
"""
|
|
24
|
+
self.logger = get_logger(self.__class__.__name__)
|
|
25
|
+
|
|
26
|
+
# Find templates directory
|
|
27
|
+
module_path = Path(__file__).parent.parent
|
|
28
|
+
if templates_dir:
|
|
29
|
+
self.templates_dir = Path(templates_dir)
|
|
30
|
+
else:
|
|
31
|
+
# Default to src/claude_mpm/agents/templates/
|
|
32
|
+
self.templates_dir = module_path / "agents" / "templates"
|
|
33
|
+
|
|
34
|
+
# Find base agent file
|
|
35
|
+
if base_agent_path:
|
|
36
|
+
self.base_agent_path = Path(base_agent_path)
|
|
37
|
+
else:
|
|
38
|
+
# Default to src/claude_mpm/agents/base_agent.json
|
|
39
|
+
self.base_agent_path = module_path / "agents" / "base_agent.json"
|
|
40
|
+
|
|
41
|
+
self.logger.info(f"Templates directory: {self.templates_dir}")
|
|
42
|
+
self.logger.info(f"Base agent path: {self.base_agent_path}")
|
|
43
|
+
|
|
44
|
+
def deploy_agents(self, target_dir: Optional[Path] = None, force_rebuild: bool = False) -> Dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Build and deploy agents by combining base_agent.md with templates.
|
|
47
|
+
Also deploys system instructions for PM framework.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
target_dir: Target directory for agents (default: .claude/agents/)
|
|
51
|
+
force_rebuild: Force rebuild even if agents exist
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dictionary with deployment results
|
|
55
|
+
"""
|
|
56
|
+
if not target_dir:
|
|
57
|
+
target_dir = Path(Paths.CLAUDE_AGENTS_DIR.value).expanduser()
|
|
58
|
+
|
|
59
|
+
target_dir = Path(target_dir)
|
|
60
|
+
results = {
|
|
61
|
+
"target_dir": str(target_dir),
|
|
62
|
+
"deployed": [],
|
|
63
|
+
"errors": [],
|
|
64
|
+
"skipped": [],
|
|
65
|
+
"updated": [],
|
|
66
|
+
"total": 0
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
# Create target directory if needed
|
|
71
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
72
|
+
self.logger.info(f"Building and deploying agents to: {target_dir}")
|
|
73
|
+
|
|
74
|
+
# Note: System instructions are now loaded directly by SimpleClaudeRunner
|
|
75
|
+
|
|
76
|
+
# Check if templates directory exists
|
|
77
|
+
if not self.templates_dir.exists():
|
|
78
|
+
error_msg = f"Templates directory not found: {self.templates_dir}"
|
|
79
|
+
self.logger.error(error_msg)
|
|
80
|
+
results["errors"].append(error_msg)
|
|
81
|
+
return results
|
|
82
|
+
|
|
83
|
+
# Load base agent content
|
|
84
|
+
base_agent_data = {}
|
|
85
|
+
base_agent_version = 0
|
|
86
|
+
if self.base_agent_path.exists():
|
|
87
|
+
try:
|
|
88
|
+
import json
|
|
89
|
+
base_agent_data = json.loads(self.base_agent_path.read_text())
|
|
90
|
+
base_agent_version = base_agent_data.get('version', 0)
|
|
91
|
+
self.logger.info(f"Loaded base agent template (version {base_agent_version})")
|
|
92
|
+
except Exception as e:
|
|
93
|
+
self.logger.warning(f"Could not load base agent: {e}")
|
|
94
|
+
|
|
95
|
+
# Get all template files
|
|
96
|
+
template_files = list(self.templates_dir.glob("*_agent.json"))
|
|
97
|
+
results["total"] = len(template_files)
|
|
98
|
+
|
|
99
|
+
for template_file in template_files:
|
|
100
|
+
try:
|
|
101
|
+
agent_name = template_file.stem.replace("_agent", "")
|
|
102
|
+
target_file = target_dir / f"{agent_name}.md"
|
|
103
|
+
|
|
104
|
+
# Check if agent needs update
|
|
105
|
+
needs_update = force_rebuild
|
|
106
|
+
if not needs_update and target_file.exists():
|
|
107
|
+
needs_update, reason = self._check_agent_needs_update(
|
|
108
|
+
target_file, template_file, base_agent_version
|
|
109
|
+
)
|
|
110
|
+
if needs_update:
|
|
111
|
+
self.logger.info(f"Agent {agent_name} needs update: {reason}")
|
|
112
|
+
|
|
113
|
+
# Skip if exists and doesn't need update
|
|
114
|
+
if target_file.exists() and not needs_update:
|
|
115
|
+
results["skipped"].append(agent_name)
|
|
116
|
+
self.logger.debug(f"Skipped up-to-date agent: {agent_name}")
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
# Build the agent file
|
|
120
|
+
agent_md = self._build_agent_markdown(agent_name, template_file, base_agent_data)
|
|
121
|
+
|
|
122
|
+
# Write the agent file
|
|
123
|
+
is_update = target_file.exists()
|
|
124
|
+
target_file.write_text(agent_md)
|
|
125
|
+
|
|
126
|
+
if is_update:
|
|
127
|
+
results["updated"].append({
|
|
128
|
+
"name": agent_name,
|
|
129
|
+
"template": str(template_file),
|
|
130
|
+
"target": str(target_file)
|
|
131
|
+
})
|
|
132
|
+
self.logger.debug(f"Updated agent: {agent_name}")
|
|
133
|
+
else:
|
|
134
|
+
results["deployed"].append({
|
|
135
|
+
"name": agent_name,
|
|
136
|
+
"template": str(template_file),
|
|
137
|
+
"target": str(target_file)
|
|
138
|
+
})
|
|
139
|
+
self.logger.debug(f"Built and deployed agent: {agent_name}")
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
error_msg = f"Failed to build {template_file.name}: {e}"
|
|
143
|
+
self.logger.error(error_msg)
|
|
144
|
+
results["errors"].append(error_msg)
|
|
145
|
+
|
|
146
|
+
self.logger.info(
|
|
147
|
+
f"Deployed {len(results['deployed'])} agents, "
|
|
148
|
+
f"updated {len(results['updated'])}, "
|
|
149
|
+
f"skipped {len(results['skipped'])}, "
|
|
150
|
+
f"errors: {len(results['errors'])}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
error_msg = f"Agent deployment failed: {e}"
|
|
155
|
+
self.logger.error(error_msg)
|
|
156
|
+
results["errors"].append(error_msg)
|
|
157
|
+
|
|
158
|
+
return results
|
|
159
|
+
|
|
160
|
+
def _extract_version(self, content: str, version_marker: str) -> int:
|
|
161
|
+
"""
|
|
162
|
+
Extract version number from content.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
content: File content
|
|
166
|
+
version_marker: Version marker to look for (e.g., "AGENT_VERSION:" or "BASE_AGENT_VERSION:")
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Version number or 0 if not found
|
|
170
|
+
"""
|
|
171
|
+
import re
|
|
172
|
+
pattern = rf"<!-- {version_marker} (\d+) -->"
|
|
173
|
+
match = re.search(pattern, content)
|
|
174
|
+
if match:
|
|
175
|
+
return int(match.group(1))
|
|
176
|
+
return 0
|
|
177
|
+
|
|
178
|
+
def _build_agent_markdown(self, agent_name: str, template_path: Path, base_agent_data: dict) -> str:
|
|
179
|
+
"""
|
|
180
|
+
Build a complete agent markdown file with YAML frontmatter.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
agent_name: Name of the agent
|
|
184
|
+
template_path: Path to the agent template JSON file
|
|
185
|
+
base_agent_data: Base agent data from JSON
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Complete agent markdown content with YAML frontmatter
|
|
189
|
+
"""
|
|
190
|
+
import json
|
|
191
|
+
from datetime import datetime
|
|
192
|
+
|
|
193
|
+
# Read template JSON
|
|
194
|
+
template_data = json.loads(template_path.read_text())
|
|
195
|
+
|
|
196
|
+
# Extract basic info
|
|
197
|
+
agent_version = template_data.get('version', 0)
|
|
198
|
+
base_version = base_agent_data.get('version', 0)
|
|
199
|
+
version_string = f"{base_version:04d}-{agent_version:04d}"
|
|
200
|
+
|
|
201
|
+
# Build YAML frontmatter
|
|
202
|
+
description = (
|
|
203
|
+
template_data.get('configuration_fields', {}).get('description') or
|
|
204
|
+
template_data.get('description') or
|
|
205
|
+
'Agent for specialized tasks'
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
tags = (
|
|
209
|
+
template_data.get('configuration_fields', {}).get('tags') or
|
|
210
|
+
template_data.get('tags') or
|
|
211
|
+
[agent_name, 'mpm-framework']
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
frontmatter = f"""---
|
|
215
|
+
name: {agent_name}
|
|
216
|
+
description: "{description}"
|
|
217
|
+
version: "{version_string}"
|
|
218
|
+
author: "{template_data.get('author', 'claude-mpm@anthropic.com')}"
|
|
219
|
+
created: "{datetime.now().isoformat()}Z"
|
|
220
|
+
updated: "{datetime.now().isoformat()}Z"
|
|
221
|
+
tags: {tags}
|
|
222
|
+
---
|
|
223
|
+
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
# Get the main content (instructions)
|
|
227
|
+
# Check multiple possible locations for instructions
|
|
228
|
+
content = (
|
|
229
|
+
template_data.get('instructions') or
|
|
230
|
+
template_data.get('narrative_fields', {}).get('instructions') or
|
|
231
|
+
template_data.get('content') or
|
|
232
|
+
f"You are the {agent_name} agent. Perform tasks related to {template_data.get('description', 'your specialization')}."
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
return frontmatter + content
|
|
236
|
+
|
|
237
|
+
def _build_agent_yaml(self, agent_name: str, template_path: Path, base_agent_data: dict) -> str:
|
|
238
|
+
"""
|
|
239
|
+
Build a complete agent YAML file by combining base agent and template.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
agent_name: Name of the agent
|
|
243
|
+
template_path: Path to the agent template JSON file
|
|
244
|
+
base_agent_data: Base agent data from JSON
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
Complete agent YAML content
|
|
248
|
+
"""
|
|
249
|
+
import json
|
|
250
|
+
from datetime import datetime
|
|
251
|
+
|
|
252
|
+
# Read template JSON
|
|
253
|
+
template_data = json.loads(template_path.read_text())
|
|
254
|
+
|
|
255
|
+
# Extract versions
|
|
256
|
+
agent_version = template_data.get('version', 0)
|
|
257
|
+
base_version = base_agent_data.get('version', 0)
|
|
258
|
+
|
|
259
|
+
# Create version string in XXXX-YYYY format
|
|
260
|
+
version_string = f"{base_version:04d}-{agent_version:04d}"
|
|
261
|
+
|
|
262
|
+
# Merge narrative fields (base + agent specific)
|
|
263
|
+
narrative_fields = self._merge_narrative_fields(base_agent_data, template_data)
|
|
264
|
+
|
|
265
|
+
# Merge configuration fields (agent overrides base)
|
|
266
|
+
config_fields = self._merge_configuration_fields(base_agent_data, template_data)
|
|
267
|
+
|
|
268
|
+
# Build YAML frontmatter following best practices
|
|
269
|
+
yaml_content = f"""---
|
|
270
|
+
# Core Identity
|
|
271
|
+
name: "{agent_name}"
|
|
272
|
+
description: "{config_fields.get('description', '')}"
|
|
273
|
+
version: "{version_string}"
|
|
274
|
+
author: "claude-mpm@anthropic.com"
|
|
275
|
+
created: "{datetime.now().isoformat()}Z"
|
|
276
|
+
updated: "{datetime.now().isoformat()}Z"
|
|
277
|
+
|
|
278
|
+
# Categorization
|
|
279
|
+
tags: {config_fields.get('tags', [])}
|
|
280
|
+
team: "{config_fields.get('team', 'mpm-framework')}"
|
|
281
|
+
project: "{config_fields.get('project', 'claude-mpm')}"
|
|
282
|
+
priority: "{config_fields.get('priority', 'high')}"
|
|
283
|
+
|
|
284
|
+
# Behavioral Configuration
|
|
285
|
+
tools: {config_fields.get('tools', [])}
|
|
286
|
+
timeout: {config_fields.get('timeout', 600)}
|
|
287
|
+
max_tokens: {config_fields.get('max_tokens', 8192)}
|
|
288
|
+
model: "{config_fields.get('model', 'claude-3-5-sonnet-20241022')}"
|
|
289
|
+
temperature: {config_fields.get('temperature', 0.3)}
|
|
290
|
+
|
|
291
|
+
# Access Control
|
|
292
|
+
file_access: "{config_fields.get('file_access', 'project')}"
|
|
293
|
+
network_access: {str(config_fields.get('network_access', True)).lower()}
|
|
294
|
+
dangerous_tools: {str(config_fields.get('dangerous_tools', False)).lower()}
|
|
295
|
+
review_required: {str(config_fields.get('review_required', False)).lower()}
|
|
296
|
+
|
|
297
|
+
# Resource Management
|
|
298
|
+
memory_limit: {config_fields.get('memory_limit', 2048)}
|
|
299
|
+
cpu_limit: {config_fields.get('cpu_limit', 50)}
|
|
300
|
+
execution_timeout: {config_fields.get('timeout', 600)}
|
|
301
|
+
|
|
302
|
+
# When/Why/What sections extracted from template
|
|
303
|
+
when_to_use:
|
|
304
|
+
{self._format_yaml_list(narrative_fields.get('when_to_use', []), 2)}
|
|
305
|
+
|
|
306
|
+
rationale:
|
|
307
|
+
specialized_knowledge:
|
|
308
|
+
{self._format_yaml_list(narrative_fields.get('specialized_knowledge', []), 4)}
|
|
309
|
+
unique_capabilities:
|
|
310
|
+
{self._format_yaml_list(narrative_fields.get('unique_capabilities', []), 4)}
|
|
311
|
+
|
|
312
|
+
capabilities:
|
|
313
|
+
primary_role: "{config_fields.get('primary_role', '')}"
|
|
314
|
+
specializations: {config_fields.get('specializations', [])}
|
|
315
|
+
authority: "{config_fields.get('authority', '')}"
|
|
316
|
+
|
|
317
|
+
# Agent Metadata
|
|
318
|
+
metadata:
|
|
319
|
+
source: "claude-mpm"
|
|
320
|
+
template_version: {agent_version}
|
|
321
|
+
base_version: {base_version}
|
|
322
|
+
deployment_type: "system"
|
|
323
|
+
|
|
324
|
+
...
|
|
325
|
+
---
|
|
326
|
+
|
|
327
|
+
# System Prompt
|
|
328
|
+
|
|
329
|
+
"""
|
|
330
|
+
|
|
331
|
+
# Add combined instructions
|
|
332
|
+
combined_instructions = narrative_fields.get('instructions', '')
|
|
333
|
+
if combined_instructions:
|
|
334
|
+
yaml_content += combined_instructions
|
|
335
|
+
|
|
336
|
+
return yaml_content
|
|
337
|
+
|
|
338
|
+
def _merge_narrative_fields(self, base_data: dict, template_data: dict) -> dict:
|
|
339
|
+
"""
|
|
340
|
+
Merge narrative fields from base and template, combining arrays.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
base_data: Base agent data
|
|
344
|
+
template_data: Agent template data
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Merged narrative fields
|
|
348
|
+
"""
|
|
349
|
+
base_narrative = base_data.get('narrative_fields', {})
|
|
350
|
+
template_narrative = template_data.get('narrative_fields', {})
|
|
351
|
+
|
|
352
|
+
merged = {}
|
|
353
|
+
|
|
354
|
+
# For narrative fields, combine base + template
|
|
355
|
+
for field in ['when_to_use', 'specialized_knowledge', 'unique_capabilities']:
|
|
356
|
+
base_items = base_narrative.get(field, [])
|
|
357
|
+
template_items = template_narrative.get(field, [])
|
|
358
|
+
merged[field] = base_items + template_items
|
|
359
|
+
|
|
360
|
+
# For instructions, combine with separator
|
|
361
|
+
base_instructions = base_narrative.get('instructions', '')
|
|
362
|
+
template_instructions = template_narrative.get('instructions', '')
|
|
363
|
+
|
|
364
|
+
if base_instructions and template_instructions:
|
|
365
|
+
merged['instructions'] = base_instructions + "\n\n---\n\n" + template_instructions
|
|
366
|
+
elif template_instructions:
|
|
367
|
+
merged['instructions'] = template_instructions
|
|
368
|
+
elif base_instructions:
|
|
369
|
+
merged['instructions'] = base_instructions
|
|
370
|
+
else:
|
|
371
|
+
merged['instructions'] = ''
|
|
372
|
+
|
|
373
|
+
return merged
|
|
374
|
+
|
|
375
|
+
def _merge_configuration_fields(self, base_data: dict, template_data: dict) -> dict:
|
|
376
|
+
"""
|
|
377
|
+
Merge configuration fields, with template overriding base.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
base_data: Base agent data
|
|
381
|
+
template_data: Agent template data
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
Merged configuration fields
|
|
385
|
+
"""
|
|
386
|
+
base_config = base_data.get('configuration_fields', {})
|
|
387
|
+
template_config = template_data.get('configuration_fields', {})
|
|
388
|
+
|
|
389
|
+
# Start with base configuration
|
|
390
|
+
merged = base_config.copy()
|
|
391
|
+
|
|
392
|
+
# Override with template-specific configuration
|
|
393
|
+
merged.update(template_config)
|
|
394
|
+
|
|
395
|
+
return merged
|
|
396
|
+
|
|
397
|
+
def set_claude_environment(self, config_dir: Optional[Path] = None) -> Dict[str, str]:
|
|
398
|
+
"""
|
|
399
|
+
Set Claude environment variables for agent discovery.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
config_dir: Claude configuration directory (default: .claude/)
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
Dictionary of environment variables set
|
|
406
|
+
"""
|
|
407
|
+
if not config_dir:
|
|
408
|
+
config_dir = Path.cwd() / Paths.CLAUDE_CONFIG_DIR.value
|
|
409
|
+
|
|
410
|
+
env_vars = {}
|
|
411
|
+
|
|
412
|
+
# Set Claude configuration directory
|
|
413
|
+
env_vars[EnvironmentVars.CLAUDE_CONFIG_DIR.value] = str(config_dir.absolute())
|
|
414
|
+
|
|
415
|
+
# Set parallel agent limits
|
|
416
|
+
env_vars[EnvironmentVars.CLAUDE_MAX_PARALLEL_SUBAGENTS.value] = EnvironmentVars.DEFAULT_MAX_AGENTS.value
|
|
417
|
+
|
|
418
|
+
# Set timeout for agent execution
|
|
419
|
+
env_vars[EnvironmentVars.CLAUDE_TIMEOUT.value] = EnvironmentVars.DEFAULT_TIMEOUT.value
|
|
420
|
+
|
|
421
|
+
# Apply environment variables
|
|
422
|
+
for key, value in env_vars.items():
|
|
423
|
+
os.environ[key] = value
|
|
424
|
+
self.logger.debug(f"Set environment: {key}={value}")
|
|
425
|
+
|
|
426
|
+
return env_vars
|
|
427
|
+
|
|
428
|
+
def verify_deployment(self, config_dir: Optional[Path] = None) -> Dict[str, Any]:
|
|
429
|
+
"""
|
|
430
|
+
Verify agent deployment and Claude configuration.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
config_dir: Claude configuration directory (default: .claude/)
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
Verification results
|
|
437
|
+
"""
|
|
438
|
+
if not config_dir:
|
|
439
|
+
config_dir = Path.cwd() / ".claude"
|
|
440
|
+
|
|
441
|
+
results = {
|
|
442
|
+
"config_dir": str(config_dir),
|
|
443
|
+
"agents_found": [],
|
|
444
|
+
"environment": {},
|
|
445
|
+
"warnings": []
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
# Check configuration directory
|
|
449
|
+
if not config_dir.exists():
|
|
450
|
+
results["warnings"].append(f"Configuration directory not found: {config_dir}")
|
|
451
|
+
return results
|
|
452
|
+
|
|
453
|
+
# Check agents directory
|
|
454
|
+
agents_dir = config_dir / "agents"
|
|
455
|
+
if not agents_dir.exists():
|
|
456
|
+
results["warnings"].append(f"Agents directory not found: {agents_dir}")
|
|
457
|
+
return results
|
|
458
|
+
|
|
459
|
+
# List deployed agents
|
|
460
|
+
agent_files = list(agents_dir.glob("*.md"))
|
|
461
|
+
for agent_file in agent_files:
|
|
462
|
+
try:
|
|
463
|
+
# Read first few lines to get agent name from YAML
|
|
464
|
+
with open(agent_file, 'r') as f:
|
|
465
|
+
lines = f.readlines()[:10]
|
|
466
|
+
|
|
467
|
+
agent_info = {
|
|
468
|
+
"file": agent_file.name,
|
|
469
|
+
"path": str(agent_file)
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
# Extract name from YAML frontmatter
|
|
473
|
+
for line in lines:
|
|
474
|
+
if line.startswith("name:"):
|
|
475
|
+
agent_info["name"] = line.split(":", 1)[1].strip().strip('"\'')
|
|
476
|
+
break
|
|
477
|
+
|
|
478
|
+
results["agents_found"].append(agent_info)
|
|
479
|
+
|
|
480
|
+
except Exception as e:
|
|
481
|
+
results["warnings"].append(f"Failed to read {agent_file.name}: {e}")
|
|
482
|
+
|
|
483
|
+
# Check environment variables
|
|
484
|
+
env_vars = ["CLAUDE_CONFIG_DIR", "CLAUDE_MAX_PARALLEL_SUBAGENTS", "CLAUDE_TIMEOUT"]
|
|
485
|
+
for var in env_vars:
|
|
486
|
+
value = os.environ.get(var)
|
|
487
|
+
if value:
|
|
488
|
+
results["environment"][var] = value
|
|
489
|
+
else:
|
|
490
|
+
results["warnings"].append(f"Environment variable not set: {var}")
|
|
491
|
+
|
|
492
|
+
return results
|
|
493
|
+
|
|
494
|
+
def list_available_agents(self) -> List[Dict[str, Any]]:
|
|
495
|
+
"""
|
|
496
|
+
List available agent templates.
|
|
497
|
+
|
|
498
|
+
Returns:
|
|
499
|
+
List of agent information dictionaries
|
|
500
|
+
"""
|
|
501
|
+
agents = []
|
|
502
|
+
|
|
503
|
+
if not self.templates_dir.exists():
|
|
504
|
+
self.logger.warning(f"Templates directory not found: {self.templates_dir}")
|
|
505
|
+
return agents
|
|
506
|
+
|
|
507
|
+
template_files = sorted(self.templates_dir.glob("*_agent.json"))
|
|
508
|
+
|
|
509
|
+
for template_file in template_files:
|
|
510
|
+
try:
|
|
511
|
+
agent_name = template_file.stem.replace("_agent", "")
|
|
512
|
+
agent_info = {
|
|
513
|
+
"name": agent_name,
|
|
514
|
+
"file": template_file.name,
|
|
515
|
+
"path": str(template_file),
|
|
516
|
+
"size": template_file.stat().st_size,
|
|
517
|
+
"description": f"{agent_name.title()} agent for specialized tasks"
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
# Try to extract metadata from template JSON
|
|
521
|
+
try:
|
|
522
|
+
import json
|
|
523
|
+
template_data = json.loads(template_file.read_text())
|
|
524
|
+
config_fields = template_data.get('configuration_fields', {})
|
|
525
|
+
|
|
526
|
+
agent_info["role"] = config_fields.get('primary_role', '')
|
|
527
|
+
agent_info["description"] = config_fields.get('description', agent_info["description"])
|
|
528
|
+
agent_info["version"] = template_data.get('version', 0)
|
|
529
|
+
|
|
530
|
+
except Exception:
|
|
531
|
+
pass # Use defaults if can't parse
|
|
532
|
+
|
|
533
|
+
agents.append(agent_info)
|
|
534
|
+
|
|
535
|
+
except Exception as e:
|
|
536
|
+
self.logger.error(f"Failed to read template {template_file.name}: {e}")
|
|
537
|
+
|
|
538
|
+
return agents
|
|
539
|
+
|
|
540
|
+
def _check_agent_needs_update(self, deployed_file: Path, template_file: Path, current_base_version: int) -> tuple:
|
|
541
|
+
"""
|
|
542
|
+
Check if a deployed agent needs to be updated.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
deployed_file: Path to the deployed agent file
|
|
546
|
+
template_file: Path to the template file
|
|
547
|
+
current_base_version: Current base agent version
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
Tuple of (needs_update, reason)
|
|
551
|
+
"""
|
|
552
|
+
try:
|
|
553
|
+
# Read deployed agent content
|
|
554
|
+
deployed_content = deployed_file.read_text()
|
|
555
|
+
|
|
556
|
+
# Check if it's a system agent (authored by claude-mpm)
|
|
557
|
+
if "author: claude-mpm" not in deployed_content and "author: 'claude-mpm'" not in deployed_content:
|
|
558
|
+
return (False, "not a system agent")
|
|
559
|
+
|
|
560
|
+
# Extract version info from YAML frontmatter
|
|
561
|
+
import re
|
|
562
|
+
|
|
563
|
+
# Extract agent version from YAML
|
|
564
|
+
agent_version_match = re.search(r"^agent_version:\s*(\d+)", deployed_content, re.MULTILINE)
|
|
565
|
+
deployed_agent_version = int(agent_version_match.group(1)) if agent_version_match else 0
|
|
566
|
+
|
|
567
|
+
# Extract base agent version from YAML
|
|
568
|
+
base_version_match = re.search(r"^base_agent_version:\s*(\d+)", deployed_content, re.MULTILINE)
|
|
569
|
+
deployed_base_version = int(base_version_match.group(1)) if base_version_match else 0
|
|
570
|
+
|
|
571
|
+
# Read template to get current agent version
|
|
572
|
+
import json
|
|
573
|
+
template_data = json.loads(template_file.read_text())
|
|
574
|
+
current_agent_version = template_data.get('version', 0)
|
|
575
|
+
|
|
576
|
+
# Check if agent template version is newer
|
|
577
|
+
if current_agent_version > deployed_agent_version:
|
|
578
|
+
return (True, f"agent template updated (v{deployed_agent_version:04d} -> v{current_agent_version:04d})")
|
|
579
|
+
|
|
580
|
+
# Check if base agent version is newer
|
|
581
|
+
if current_base_version > deployed_base_version:
|
|
582
|
+
return (True, f"base agent updated (v{deployed_base_version:04d} -> v{current_base_version:04d})")
|
|
583
|
+
|
|
584
|
+
return (False, "up to date")
|
|
585
|
+
|
|
586
|
+
except Exception as e:
|
|
587
|
+
self.logger.warning(f"Error checking agent update status: {e}")
|
|
588
|
+
# On error, assume update is needed
|
|
589
|
+
return (True, "version check failed")
|
|
590
|
+
|
|
591
|
+
def clean_deployment(self, config_dir: Optional[Path] = None) -> Dict[str, Any]:
|
|
592
|
+
"""
|
|
593
|
+
Clean up deployed agents.
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
config_dir: Claude configuration directory (default: .claude/)
|
|
597
|
+
|
|
598
|
+
Returns:
|
|
599
|
+
Cleanup results
|
|
600
|
+
"""
|
|
601
|
+
if not config_dir:
|
|
602
|
+
config_dir = Path.cwd() / ".claude"
|
|
603
|
+
|
|
604
|
+
results = {
|
|
605
|
+
"removed": [],
|
|
606
|
+
"errors": []
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
agents_dir = config_dir / "agents"
|
|
610
|
+
if not agents_dir.exists():
|
|
611
|
+
results["errors"].append(f"Agents directory not found: {agents_dir}")
|
|
612
|
+
return results
|
|
613
|
+
|
|
614
|
+
# Remove system agents only (identified by claude-mpm author)
|
|
615
|
+
agent_files = list(agents_dir.glob("*.md"))
|
|
616
|
+
|
|
617
|
+
for agent_file in agent_files:
|
|
618
|
+
try:
|
|
619
|
+
# Check if it's a system agent
|
|
620
|
+
with open(agent_file, 'r') as f:
|
|
621
|
+
content = f.read()
|
|
622
|
+
if "author: claude-mpm" in content or "author: 'claude-mpm'" in content:
|
|
623
|
+
agent_file.unlink()
|
|
624
|
+
results["removed"].append(str(agent_file))
|
|
625
|
+
self.logger.debug(f"Removed agent: {agent_file.name}")
|
|
626
|
+
|
|
627
|
+
except Exception as e:
|
|
628
|
+
error_msg = f"Failed to remove {agent_file.name}: {e}"
|
|
629
|
+
self.logger.error(error_msg)
|
|
630
|
+
results["errors"].append(error_msg)
|
|
631
|
+
|
|
632
|
+
return results
|
|
633
|
+
|
|
634
|
+
def _extract_agent_metadata(self, template_content: str) -> Dict[str, Any]:
|
|
635
|
+
"""
|
|
636
|
+
Extract metadata from simplified agent template content.
|
|
637
|
+
|
|
638
|
+
Args:
|
|
639
|
+
template_content: Agent template markdown content
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
Dictionary of extracted metadata
|
|
643
|
+
"""
|
|
644
|
+
metadata = {}
|
|
645
|
+
lines = template_content.split('\n')
|
|
646
|
+
|
|
647
|
+
# Extract sections based on the new simplified format
|
|
648
|
+
current_section = None
|
|
649
|
+
section_content = []
|
|
650
|
+
|
|
651
|
+
for line in lines:
|
|
652
|
+
line = line.strip()
|
|
653
|
+
|
|
654
|
+
if line.startswith('## When to Use'):
|
|
655
|
+
# Save previous section before starting new one
|
|
656
|
+
if current_section and section_content:
|
|
657
|
+
metadata[current_section] = section_content.copy()
|
|
658
|
+
current_section = 'when_to_use'
|
|
659
|
+
section_content = []
|
|
660
|
+
elif line.startswith('## Specialized Knowledge'):
|
|
661
|
+
# Save previous section before starting new one
|
|
662
|
+
if current_section and section_content:
|
|
663
|
+
metadata[current_section] = section_content.copy()
|
|
664
|
+
current_section = 'specialized_knowledge'
|
|
665
|
+
section_content = []
|
|
666
|
+
elif line.startswith('## Unique Capabilities'):
|
|
667
|
+
# Save previous section before starting new one
|
|
668
|
+
if current_section and section_content:
|
|
669
|
+
metadata[current_section] = section_content.copy()
|
|
670
|
+
current_section = 'unique_capabilities'
|
|
671
|
+
section_content = []
|
|
672
|
+
elif line.startswith('## ') or line.startswith('# '):
|
|
673
|
+
# End of section - save current section
|
|
674
|
+
if current_section and section_content:
|
|
675
|
+
metadata[current_section] = section_content.copy()
|
|
676
|
+
current_section = None
|
|
677
|
+
section_content = []
|
|
678
|
+
elif current_section and line.startswith('- '):
|
|
679
|
+
# Extract list item, removing the "- " prefix
|
|
680
|
+
item = line[2:].strip()
|
|
681
|
+
if item:
|
|
682
|
+
section_content.append(item)
|
|
683
|
+
|
|
684
|
+
# Handle last section if file ends without another header
|
|
685
|
+
if current_section and section_content:
|
|
686
|
+
metadata[current_section] = section_content.copy()
|
|
687
|
+
|
|
688
|
+
# Ensure all required fields have defaults
|
|
689
|
+
metadata.setdefault('when_to_use', [])
|
|
690
|
+
metadata.setdefault('specialized_knowledge', [])
|
|
691
|
+
metadata.setdefault('unique_capabilities', [])
|
|
692
|
+
|
|
693
|
+
return metadata
|
|
694
|
+
|
|
695
|
+
def _get_agent_tools(self, agent_name: str, metadata: Dict[str, Any]) -> List[str]:
|
|
696
|
+
"""
|
|
697
|
+
Get appropriate tools for an agent based on its type.
|
|
698
|
+
|
|
699
|
+
Args:
|
|
700
|
+
agent_name: Name of the agent
|
|
701
|
+
metadata: Agent metadata
|
|
702
|
+
|
|
703
|
+
Returns:
|
|
704
|
+
List of tool names
|
|
705
|
+
"""
|
|
706
|
+
# Base tools all agents should have
|
|
707
|
+
base_tools = [
|
|
708
|
+
"Read",
|
|
709
|
+
"Write",
|
|
710
|
+
"Edit",
|
|
711
|
+
"MultiEdit",
|
|
712
|
+
"Grep",
|
|
713
|
+
"Glob",
|
|
714
|
+
"LS",
|
|
715
|
+
"TodoWrite"
|
|
716
|
+
]
|
|
717
|
+
|
|
718
|
+
# Agent-specific tools
|
|
719
|
+
agent_tools = {
|
|
720
|
+
'engineer': base_tools + ["Bash", "WebSearch", "WebFetch"],
|
|
721
|
+
'qa': base_tools + ["Bash", "WebSearch"],
|
|
722
|
+
'documentation': base_tools + ["WebSearch", "WebFetch"],
|
|
723
|
+
'research': base_tools + ["WebSearch", "WebFetch", "Bash"],
|
|
724
|
+
'security': base_tools + ["Bash", "WebSearch", "Grep"],
|
|
725
|
+
'ops': base_tools + ["Bash", "WebSearch"],
|
|
726
|
+
'data_engineer': base_tools + ["Bash", "WebSearch"],
|
|
727
|
+
'version_control': base_tools + ["Bash"]
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
# Return specific tools or default set
|
|
731
|
+
return agent_tools.get(agent_name, base_tools + ["Bash", "WebSearch"])
|
|
732
|
+
|
|
733
|
+
def _format_yaml_list(self, items: List[str], indent: int) -> str:
|
|
734
|
+
"""
|
|
735
|
+
Format a list for YAML with proper indentation.
|
|
736
|
+
|
|
737
|
+
Args:
|
|
738
|
+
items: List of items
|
|
739
|
+
indent: Number of spaces to indent
|
|
740
|
+
|
|
741
|
+
Returns:
|
|
742
|
+
Formatted YAML list string
|
|
743
|
+
"""
|
|
744
|
+
if not items:
|
|
745
|
+
items = ["No items specified"]
|
|
746
|
+
|
|
747
|
+
indent_str = " " * indent
|
|
748
|
+
formatted_items = []
|
|
749
|
+
|
|
750
|
+
for item in items:
|
|
751
|
+
# Escape quotes in the item
|
|
752
|
+
item = item.replace('"', '\\"')
|
|
753
|
+
formatted_items.append(f'{indent_str}- "{item}"')
|
|
754
|
+
|
|
755
|
+
return '\n'.join(formatted_items)
|
|
756
|
+
|
|
757
|
+
def _get_agent_specific_config(self, agent_name: str) -> Dict[str, Any]:
|
|
758
|
+
"""
|
|
759
|
+
Get agent-specific configuration based on agent type.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
agent_name: Name of the agent
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
Dictionary of agent-specific configuration
|
|
766
|
+
"""
|
|
767
|
+
# Base configuration all agents share
|
|
768
|
+
base_config = {
|
|
769
|
+
'timeout': 600,
|
|
770
|
+
'max_tokens': 8192,
|
|
771
|
+
'memory_limit': 2048,
|
|
772
|
+
'cpu_limit': 50,
|
|
773
|
+
'network_access': True,
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
# Agent-specific configurations
|
|
777
|
+
configs = {
|
|
778
|
+
'engineer': {
|
|
779
|
+
**base_config,
|
|
780
|
+
'description': 'Code implementation, development, and inline documentation',
|
|
781
|
+
'tags': '["engineer", "development", "coding", "implementation"]',
|
|
782
|
+
'tools': '["Read", "Write", "Edit", "MultiEdit", "Bash", "Grep", "Glob", "LS", "WebSearch", "TodoWrite"]',
|
|
783
|
+
'temperature': 0.2,
|
|
784
|
+
'when_to_use': ['Code implementation needed', 'Bug fixes required', 'Refactoring tasks'],
|
|
785
|
+
'specialized_knowledge': ['Programming best practices', 'Design patterns', 'Code optimization'],
|
|
786
|
+
'unique_capabilities': ['Write production code', 'Debug complex issues', 'Refactor codebases'],
|
|
787
|
+
'primary_role': 'Code implementation and development',
|
|
788
|
+
'specializations': '["coding", "debugging", "refactoring", "optimization"]',
|
|
789
|
+
'authority': 'ALL code implementation decisions',
|
|
790
|
+
},
|
|
791
|
+
'qa': {
|
|
792
|
+
**base_config,
|
|
793
|
+
'description': 'Quality assurance, testing, and validation',
|
|
794
|
+
'tags': '["qa", "testing", "quality", "validation"]',
|
|
795
|
+
'tools': '["Read", "Write", "Edit", "Bash", "Grep", "Glob", "LS", "TodoWrite"]',
|
|
796
|
+
'temperature': 0.1,
|
|
797
|
+
'when_to_use': ['Testing needed', 'Quality validation', 'Test coverage analysis'],
|
|
798
|
+
'specialized_knowledge': ['Testing methodologies', 'Quality metrics', 'Test automation'],
|
|
799
|
+
'unique_capabilities': ['Execute test suites', 'Identify edge cases', 'Validate quality'],
|
|
800
|
+
'primary_role': 'Testing and quality assurance',
|
|
801
|
+
'specializations': '["testing", "validation", "quality-assurance", "coverage"]',
|
|
802
|
+
'authority': 'ALL testing and quality decisions',
|
|
803
|
+
},
|
|
804
|
+
'documentation': {
|
|
805
|
+
**base_config,
|
|
806
|
+
'description': 'Documentation creation, maintenance, and changelog generation',
|
|
807
|
+
'tags': '["documentation", "writing", "changelog", "docs"]',
|
|
808
|
+
'tools': '["Read", "Write", "Edit", "MultiEdit", "Grep", "Glob", "LS", "WebSearch", "TodoWrite"]',
|
|
809
|
+
'temperature': 0.3,
|
|
810
|
+
'when_to_use': ['Documentation updates needed', 'Changelog generation', 'README updates'],
|
|
811
|
+
'specialized_knowledge': ['Technical writing', 'Documentation standards', 'Semantic versioning'],
|
|
812
|
+
'unique_capabilities': ['Create clear documentation', 'Generate changelogs', 'Maintain docs'],
|
|
813
|
+
'primary_role': 'Documentation and technical writing',
|
|
814
|
+
'specializations': '["technical-writing", "changelog", "api-docs", "guides"]',
|
|
815
|
+
'authority': 'ALL documentation decisions',
|
|
816
|
+
},
|
|
817
|
+
'research': {
|
|
818
|
+
**base_config,
|
|
819
|
+
'description': 'Technical research, analysis, and investigation',
|
|
820
|
+
'tags': '["research", "analysis", "investigation", "evaluation"]',
|
|
821
|
+
'tools': '["Read", "Grep", "Glob", "LS", "WebSearch", "WebFetch", "TodoWrite"]',
|
|
822
|
+
'temperature': 0.4,
|
|
823
|
+
'when_to_use': ['Technical research needed', 'Solution evaluation', 'Best practices investigation'],
|
|
824
|
+
'specialized_knowledge': ['Research methodologies', 'Technical analysis', 'Evaluation frameworks'],
|
|
825
|
+
'unique_capabilities': ['Deep investigation', 'Comparative analysis', 'Evidence-based recommendations'],
|
|
826
|
+
'primary_role': 'Research and technical analysis',
|
|
827
|
+
'specializations': '["investigation", "analysis", "evaluation", "recommendations"]',
|
|
828
|
+
'authority': 'ALL research decisions',
|
|
829
|
+
},
|
|
830
|
+
'security': {
|
|
831
|
+
**base_config,
|
|
832
|
+
'description': 'Security analysis, vulnerability assessment, and protection',
|
|
833
|
+
'tags': '["security", "vulnerability", "protection", "audit"]',
|
|
834
|
+
'tools': '["Read", "Grep", "Glob", "LS", "Bash", "WebSearch", "TodoWrite"]',
|
|
835
|
+
'temperature': 0.1,
|
|
836
|
+
'when_to_use': ['Security review needed', 'Vulnerability assessment', 'Security audit'],
|
|
837
|
+
'specialized_knowledge': ['Security best practices', 'OWASP guidelines', 'Vulnerability patterns'],
|
|
838
|
+
'unique_capabilities': ['Identify vulnerabilities', 'Security auditing', 'Threat modeling'],
|
|
839
|
+
'primary_role': 'Security analysis and protection',
|
|
840
|
+
'specializations': '["vulnerability-assessment", "security-audit", "threat-modeling", "protection"]',
|
|
841
|
+
'authority': 'ALL security decisions',
|
|
842
|
+
},
|
|
843
|
+
'ops': {
|
|
844
|
+
**base_config,
|
|
845
|
+
'description': 'Deployment, operations, and infrastructure management',
|
|
846
|
+
'tags': '["ops", "deployment", "infrastructure", "devops"]',
|
|
847
|
+
'tools': '["Read", "Write", "Edit", "Bash", "Grep", "Glob", "LS", "TodoWrite"]',
|
|
848
|
+
'temperature': 0.2,
|
|
849
|
+
'when_to_use': ['Deployment configuration', 'Infrastructure setup', 'CI/CD pipeline work'],
|
|
850
|
+
'specialized_knowledge': ['Deployment best practices', 'Infrastructure as code', 'CI/CD'],
|
|
851
|
+
'unique_capabilities': ['Configure deployments', 'Manage infrastructure', 'Automate operations'],
|
|
852
|
+
'primary_role': 'Operations and deployment management',
|
|
853
|
+
'specializations': '["deployment", "infrastructure", "automation", "monitoring"]',
|
|
854
|
+
'authority': 'ALL operations decisions',
|
|
855
|
+
},
|
|
856
|
+
'data_engineer': {
|
|
857
|
+
**base_config,
|
|
858
|
+
'description': 'Data pipeline management and AI API integrations',
|
|
859
|
+
'tags': '["data", "pipeline", "etl", "ai-integration"]',
|
|
860
|
+
'tools': '["Read", "Write", "Edit", "Bash", "Grep", "Glob", "LS", "WebSearch", "TodoWrite"]',
|
|
861
|
+
'temperature': 0.2,
|
|
862
|
+
'when_to_use': ['Data pipeline setup', 'Database design', 'AI API integration'],
|
|
863
|
+
'specialized_knowledge': ['Data architectures', 'ETL processes', 'AI/ML APIs'],
|
|
864
|
+
'unique_capabilities': ['Design data schemas', 'Build pipelines', 'Integrate AI services'],
|
|
865
|
+
'primary_role': 'Data engineering and AI integration',
|
|
866
|
+
'specializations': '["data-pipelines", "etl", "database", "ai-integration"]',
|
|
867
|
+
'authority': 'ALL data engineering decisions',
|
|
868
|
+
},
|
|
869
|
+
'version_control': {
|
|
870
|
+
**base_config,
|
|
871
|
+
'description': 'Git operations, version management, and release coordination',
|
|
872
|
+
'tags': '["git", "version-control", "release", "branching"]',
|
|
873
|
+
'tools': '["Read", "Bash", "Grep", "Glob", "LS", "TodoWrite"]',
|
|
874
|
+
'temperature': 0.1,
|
|
875
|
+
'network_access': False, # Git operations are local
|
|
876
|
+
'when_to_use': ['Git operations needed', 'Version bumping', 'Release management'],
|
|
877
|
+
'specialized_knowledge': ['Git workflows', 'Semantic versioning', 'Release processes'],
|
|
878
|
+
'unique_capabilities': ['Complex git operations', 'Version management', 'Release coordination'],
|
|
879
|
+
'primary_role': 'Version control and release management',
|
|
880
|
+
'specializations': '["git", "versioning", "branching", "releases"]',
|
|
881
|
+
'authority': 'ALL version control decisions',
|
|
882
|
+
}
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
# Return the specific config or a default
|
|
886
|
+
return configs.get(agent_name, {
|
|
887
|
+
**base_config,
|
|
888
|
+
'description': f'{agent_name.title()} agent for specialized tasks',
|
|
889
|
+
'tags': f'["{agent_name}", "specialized", "mpm"]',
|
|
890
|
+
'tools': '["Read", "Write", "Edit", "Grep", "Glob", "LS", "TodoWrite"]',
|
|
891
|
+
'temperature': 0.3,
|
|
892
|
+
'when_to_use': [f'When {agent_name} expertise is needed'],
|
|
893
|
+
'specialized_knowledge': [f'{agent_name.title()} domain knowledge'],
|
|
894
|
+
'unique_capabilities': [f'{agent_name.title()} specialized operations'],
|
|
895
|
+
'primary_role': f'{agent_name.title()} operations',
|
|
896
|
+
'specializations': f'["{agent_name}"]',
|
|
897
|
+
'authority': f'ALL {agent_name} decisions',
|
|
898
|
+
})
|
|
899
|
+
|
|
900
|
+
def _deploy_system_instructions(self, target_dir: Path, force_rebuild: bool, results: Dict[str, Any]) -> None:
|
|
901
|
+
"""
|
|
902
|
+
Deploy system instructions for PM framework.
|
|
903
|
+
|
|
904
|
+
Args:
|
|
905
|
+
target_dir: Target directory for deployment
|
|
906
|
+
force_rebuild: Force rebuild even if exists
|
|
907
|
+
results: Results dictionary to update
|
|
908
|
+
"""
|
|
909
|
+
try:
|
|
910
|
+
# Find the INSTRUCTIONS.md file
|
|
911
|
+
module_path = Path(__file__).parent.parent
|
|
912
|
+
instructions_path = module_path / "agents" / "INSTRUCTIONS.md"
|
|
913
|
+
|
|
914
|
+
if not instructions_path.exists():
|
|
915
|
+
self.logger.warning(f"System instructions not found: {instructions_path}")
|
|
916
|
+
return
|
|
917
|
+
|
|
918
|
+
# Target file for system instructions - use CLAUDE.md in user's home .claude directory
|
|
919
|
+
target_file = Path("~/.claude/CLAUDE.md").expanduser()
|
|
920
|
+
|
|
921
|
+
# Ensure .claude directory exists
|
|
922
|
+
target_file.parent.mkdir(exist_ok=True)
|
|
923
|
+
|
|
924
|
+
# Check if update needed
|
|
925
|
+
if not force_rebuild and target_file.exists():
|
|
926
|
+
# Compare modification times
|
|
927
|
+
if target_file.stat().st_mtime >= instructions_path.stat().st_mtime:
|
|
928
|
+
results["skipped"].append("CLAUDE.md")
|
|
929
|
+
self.logger.debug("System instructions up to date")
|
|
930
|
+
return
|
|
931
|
+
|
|
932
|
+
# Read and deploy system instructions
|
|
933
|
+
instructions_content = instructions_path.read_text()
|
|
934
|
+
target_file.write_text(instructions_content)
|
|
935
|
+
|
|
936
|
+
is_update = target_file.exists()
|
|
937
|
+
if is_update:
|
|
938
|
+
results["updated"].append({
|
|
939
|
+
"name": "CLAUDE.md",
|
|
940
|
+
"template": str(instructions_path),
|
|
941
|
+
"target": str(target_file)
|
|
942
|
+
})
|
|
943
|
+
self.logger.info("Updated system instructions")
|
|
944
|
+
else:
|
|
945
|
+
results["deployed"].append({
|
|
946
|
+
"name": "CLAUDE.md",
|
|
947
|
+
"template": str(instructions_path),
|
|
948
|
+
"target": str(target_file)
|
|
949
|
+
})
|
|
950
|
+
self.logger.info("Deployed system instructions")
|
|
951
|
+
|
|
952
|
+
except Exception as e:
|
|
953
|
+
error_msg = f"Failed to deploy system instructions: {e}"
|
|
954
|
+
self.logger.error(error_msg)
|
|
955
|
+
results["errors"].append(error_msg)
|