claude-mpm 4.4.0__py3-none-any.whl → 4.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/WORKFLOW.md +2 -14
- claude_mpm/agents/agent_loader.py +3 -2
- claude_mpm/agents/agent_loader_integration.py +2 -1
- claude_mpm/agents/async_agent_loader.py +2 -2
- claude_mpm/agents/base_agent_loader.py +2 -2
- claude_mpm/agents/frontmatter_validator.py +1 -0
- claude_mpm/agents/system_agent_config.py +2 -1
- claude_mpm/cli/commands/configure.py +2 -29
- claude_mpm/cli/commands/doctor.py +44 -5
- claude_mpm/cli/commands/mpm_init.py +117 -63
- claude_mpm/cli/parsers/configure_parser.py +6 -15
- claude_mpm/cli/startup_logging.py +1 -3
- claude_mpm/config/agent_config.py +1 -1
- claude_mpm/config/paths.py +2 -1
- claude_mpm/core/agent_name_normalizer.py +1 -0
- claude_mpm/core/config.py +2 -1
- claude_mpm/core/config_aliases.py +2 -1
- claude_mpm/core/file_utils.py +0 -1
- claude_mpm/core/framework/__init__.py +38 -0
- claude_mpm/core/framework/formatters/__init__.py +11 -0
- claude_mpm/core/framework/formatters/capability_generator.py +367 -0
- claude_mpm/core/framework/formatters/content_formatter.py +288 -0
- claude_mpm/core/framework/formatters/context_generator.py +184 -0
- claude_mpm/core/framework/loaders/__init__.py +13 -0
- claude_mpm/core/framework/loaders/agent_loader.py +206 -0
- claude_mpm/core/framework/loaders/file_loader.py +223 -0
- claude_mpm/core/framework/loaders/instruction_loader.py +161 -0
- claude_mpm/core/framework/loaders/packaged_loader.py +232 -0
- claude_mpm/core/framework/processors/__init__.py +11 -0
- claude_mpm/core/framework/processors/memory_processor.py +230 -0
- claude_mpm/core/framework/processors/metadata_processor.py +146 -0
- claude_mpm/core/framework/processors/template_processor.py +244 -0
- claude_mpm/core/framework_loader.py +298 -1795
- claude_mpm/core/log_manager.py +2 -1
- claude_mpm/core/tool_access_control.py +1 -0
- claude_mpm/core/unified_agent_registry.py +2 -1
- claude_mpm/core/unified_paths.py +1 -0
- claude_mpm/experimental/cli_enhancements.py +1 -0
- claude_mpm/hooks/__init__.py +9 -1
- claude_mpm/hooks/base_hook.py +1 -0
- claude_mpm/hooks/instruction_reinforcement.py +1 -0
- claude_mpm/hooks/kuzu_memory_hook.py +359 -0
- claude_mpm/hooks/validation_hooks.py +1 -1
- claude_mpm/scripts/mpm_doctor.py +1 -0
- claude_mpm/services/agents/loading/agent_profile_loader.py +1 -1
- claude_mpm/services/agents/loading/base_agent_manager.py +1 -1
- claude_mpm/services/agents/loading/framework_agent_loader.py +1 -1
- claude_mpm/services/agents/management/agent_capabilities_generator.py +1 -0
- claude_mpm/services/agents/management/agent_management_service.py +1 -1
- claude_mpm/services/agents/memory/memory_categorization_service.py +0 -1
- claude_mpm/services/agents/memory/memory_file_service.py +6 -2
- claude_mpm/services/agents/memory/memory_format_service.py +0 -1
- claude_mpm/services/agents/registry/deployed_agent_discovery.py +1 -1
- claude_mpm/services/async_session_logger.py +1 -1
- claude_mpm/services/claude_session_logger.py +1 -0
- claude_mpm/services/core/path_resolver.py +2 -0
- claude_mpm/services/diagnostics/checks/__init__.py +2 -0
- claude_mpm/services/diagnostics/checks/installation_check.py +126 -25
- claude_mpm/services/diagnostics/checks/mcp_services_check.py +399 -0
- claude_mpm/services/diagnostics/diagnostic_runner.py +4 -0
- claude_mpm/services/diagnostics/doctor_reporter.py +259 -32
- claude_mpm/services/event_bus/direct_relay.py +2 -1
- claude_mpm/services/event_bus/event_bus.py +1 -0
- claude_mpm/services/event_bus/relay.py +3 -2
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +1 -1
- claude_mpm/services/infrastructure/daemon_manager.py +1 -1
- claude_mpm/services/mcp_config_manager.py +67 -4
- claude_mpm/services/mcp_gateway/core/process_pool.py +320 -0
- claude_mpm/services/mcp_gateway/core/startup_verification.py +2 -2
- claude_mpm/services/mcp_gateway/main.py +3 -13
- claude_mpm/services/mcp_gateway/server/stdio_server.py +4 -10
- claude_mpm/services/mcp_gateway/tools/__init__.py +14 -2
- claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +38 -6
- claude_mpm/services/mcp_gateway/tools/kuzu_memory_service.py +527 -0
- claude_mpm/services/memory/cache/simple_cache.py +1 -1
- claude_mpm/services/project/archive_manager.py +159 -96
- claude_mpm/services/project/documentation_manager.py +64 -45
- claude_mpm/services/project/enhanced_analyzer.py +132 -89
- claude_mpm/services/project/project_organizer.py +225 -131
- claude_mpm/services/response_tracker.py +1 -1
- claude_mpm/services/shared/__init__.py +2 -1
- claude_mpm/services/shared/service_factory.py +8 -5
- claude_mpm/services/socketio/server/eventbus_integration.py +1 -1
- claude_mpm/services/unified/__init__.py +1 -1
- claude_mpm/services/unified/analyzer_strategies/__init__.py +3 -3
- claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +97 -53
- claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +81 -40
- claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +277 -178
- claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +196 -112
- claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +83 -49
- claude_mpm/services/unified/config_strategies/__init__.py +175 -0
- claude_mpm/services/unified/config_strategies/config_schema.py +735 -0
- claude_mpm/services/unified/config_strategies/context_strategy.py +750 -0
- claude_mpm/services/unified/config_strategies/error_handling_strategy.py +1009 -0
- claude_mpm/services/unified/config_strategies/file_loader_strategy.py +879 -0
- claude_mpm/services/unified/config_strategies/unified_config_service.py +814 -0
- claude_mpm/services/unified/config_strategies/validation_strategy.py +1144 -0
- claude_mpm/services/unified/deployment_strategies/__init__.py +7 -7
- claude_mpm/services/unified/deployment_strategies/base.py +24 -28
- claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +168 -88
- claude_mpm/services/unified/deployment_strategies/local.py +49 -34
- claude_mpm/services/unified/deployment_strategies/utils.py +39 -43
- claude_mpm/services/unified/deployment_strategies/vercel.py +30 -24
- claude_mpm/services/unified/interfaces.py +0 -26
- claude_mpm/services/unified/migration.py +17 -40
- claude_mpm/services/unified/strategies.py +9 -26
- claude_mpm/services/unified/unified_analyzer.py +48 -44
- claude_mpm/services/unified/unified_config.py +21 -19
- claude_mpm/services/unified/unified_deployment.py +21 -26
- claude_mpm/storage/state_storage.py +1 -0
- claude_mpm/utils/agent_dependency_loader.py +18 -6
- claude_mpm/utils/common.py +14 -12
- claude_mpm/utils/database_connector.py +15 -12
- claude_mpm/utils/error_handler.py +1 -0
- claude_mpm/utils/log_cleanup.py +1 -0
- claude_mpm/utils/path_operations.py +1 -0
- claude_mpm/utils/session_logging.py +1 -1
- claude_mpm/utils/subprocess_utils.py +1 -0
- claude_mpm/validation/agent_validator.py +1 -1
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/METADATA +23 -17
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/RECORD +126 -105
- claude_mpm/cli/commands/configure_tui.py +0 -1927
- claude_mpm/services/mcp_gateway/tools/ticket_tools.py +0 -645
- claude_mpm/services/mcp_gateway/tools/unified_ticket_tool.py +0 -602
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/WHEEL +0 -0
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.4.0.dist-info → claude_mpm-4.4.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,230 @@
|
|
1
|
+
"""Memory content processor for framework memory management."""
|
2
|
+
|
3
|
+
from pathlib import Path
|
4
|
+
from typing import Any, Dict, Set
|
5
|
+
|
6
|
+
from claude_mpm.core.logging_utils import get_logger
|
7
|
+
|
8
|
+
|
9
|
+
class MemoryProcessor:
|
10
|
+
"""Processes and manages memory content for agents."""
|
11
|
+
|
12
|
+
def __init__(self):
|
13
|
+
"""Initialize the memory processor."""
|
14
|
+
self.logger = get_logger("memory_processor")
|
15
|
+
|
16
|
+
def load_pm_memories(self) -> Dict[str, str]:
|
17
|
+
"""Load PM memories from various locations.
|
18
|
+
|
19
|
+
Returns:
|
20
|
+
Dictionary with actual_memories content
|
21
|
+
"""
|
22
|
+
memories = {}
|
23
|
+
|
24
|
+
# Check for project-specific PM memories (highest priority)
|
25
|
+
project_memory_file = Path.cwd() / ".claude-mpm" / "memories" / "PM_memories.md"
|
26
|
+
if project_memory_file.exists():
|
27
|
+
try:
|
28
|
+
content = project_memory_file.read_text()
|
29
|
+
memories["actual_memories"] = content
|
30
|
+
memories["memory_source"] = "project"
|
31
|
+
self.logger.info(
|
32
|
+
f"Loaded PM memories from project: {project_memory_file}"
|
33
|
+
)
|
34
|
+
return memories
|
35
|
+
except Exception as e:
|
36
|
+
self.logger.error(f"Failed to load project PM memories: {e}")
|
37
|
+
|
38
|
+
# Check for user-specific PM memories (fallback)
|
39
|
+
user_memory_file = Path.home() / ".claude-mpm" / "memories" / "PM_memories.md"
|
40
|
+
if user_memory_file.exists():
|
41
|
+
try:
|
42
|
+
content = user_memory_file.read_text()
|
43
|
+
memories["actual_memories"] = content
|
44
|
+
memories["memory_source"] = "user"
|
45
|
+
self.logger.info(f"Loaded PM memories from user: {user_memory_file}")
|
46
|
+
return memories
|
47
|
+
except Exception as e:
|
48
|
+
self.logger.error(f"Failed to load user PM memories: {e}")
|
49
|
+
|
50
|
+
return memories
|
51
|
+
|
52
|
+
def load_agent_memories(self, deployed_agents: Set[str]) -> Dict[str, str]:
|
53
|
+
"""Load memories for deployed agents.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
deployed_agents: Set of deployed agent names
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
Dictionary mapping agent names to their memory content
|
60
|
+
"""
|
61
|
+
agent_memories = {}
|
62
|
+
|
63
|
+
# Define memory file search locations
|
64
|
+
memory_locations = [
|
65
|
+
Path.cwd() / ".claude-mpm" / "memories", # Project memories
|
66
|
+
Path.home() / ".claude-mpm" / "memories", # User memories
|
67
|
+
]
|
68
|
+
|
69
|
+
for agent_name in deployed_agents:
|
70
|
+
memory_filename = f"{agent_name}_memories.md"
|
71
|
+
|
72
|
+
# Search for memory file in each location (project takes precedence)
|
73
|
+
for memory_dir in memory_locations:
|
74
|
+
memory_file = memory_dir / memory_filename
|
75
|
+
if memory_file.exists():
|
76
|
+
try:
|
77
|
+
content = memory_file.read_text()
|
78
|
+
agent_memories[agent_name] = content
|
79
|
+
self.logger.debug(
|
80
|
+
f"Loaded memories for {agent_name} from {memory_file}"
|
81
|
+
)
|
82
|
+
break # Use first found (project > user)
|
83
|
+
except Exception as e:
|
84
|
+
self.logger.error(
|
85
|
+
f"Failed to load memories for {agent_name}: {e}"
|
86
|
+
)
|
87
|
+
|
88
|
+
return agent_memories
|
89
|
+
|
90
|
+
def aggregate_memories(
|
91
|
+
self,
|
92
|
+
pm_memories: Dict[str, str],
|
93
|
+
agent_memories: Dict[str, str],
|
94
|
+
) -> Dict[str, Any]:
|
95
|
+
"""Aggregate all memories into a single structure.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
pm_memories: PM memory content
|
99
|
+
agent_memories: Agent-specific memories
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
Aggregated memory structure
|
103
|
+
"""
|
104
|
+
result = {}
|
105
|
+
|
106
|
+
# Add PM memories
|
107
|
+
if pm_memories.get("actual_memories"):
|
108
|
+
result["actual_memories"] = pm_memories["actual_memories"]
|
109
|
+
result["memory_source"] = pm_memories.get("memory_source", "unknown")
|
110
|
+
|
111
|
+
# Add agent memories
|
112
|
+
if agent_memories:
|
113
|
+
result["agent_memories"] = agent_memories
|
114
|
+
|
115
|
+
return result
|
116
|
+
|
117
|
+
def format_memory_section(self, memories: Dict[str, Any]) -> str:
|
118
|
+
"""Format memories into a section for instructions.
|
119
|
+
|
120
|
+
Args:
|
121
|
+
memories: Memory content dictionary
|
122
|
+
|
123
|
+
Returns:
|
124
|
+
Formatted memory section
|
125
|
+
"""
|
126
|
+
sections = []
|
127
|
+
|
128
|
+
# Format PM memories
|
129
|
+
if memories.get("actual_memories"):
|
130
|
+
sections.append("\n\n## Current PM Memories\n\n")
|
131
|
+
sections.append(
|
132
|
+
"**The following are your accumulated memories and knowledge from this project:**\n\n"
|
133
|
+
)
|
134
|
+
sections.append(memories["actual_memories"])
|
135
|
+
sections.append("\n")
|
136
|
+
|
137
|
+
# Format agent memories
|
138
|
+
if memories.get("agent_memories"):
|
139
|
+
agent_memories = memories["agent_memories"]
|
140
|
+
if agent_memories:
|
141
|
+
sections.append("\n\n## Agent Memories\n\n")
|
142
|
+
sections.append(
|
143
|
+
"**The following are accumulated memories from specialized agents:**\n\n"
|
144
|
+
)
|
145
|
+
|
146
|
+
for agent_name in sorted(agent_memories.keys()):
|
147
|
+
memory_content = agent_memories[agent_name]
|
148
|
+
if memory_content:
|
149
|
+
formatted_name = agent_name.replace("_", " ").title()
|
150
|
+
sections.append(f"### {formatted_name} Agent Memory\n\n")
|
151
|
+
sections.append(memory_content)
|
152
|
+
sections.append("\n\n")
|
153
|
+
|
154
|
+
return "".join(sections)
|
155
|
+
|
156
|
+
def deduplicate_memories(self, memories: Dict[str, str]) -> Dict[str, str]:
|
157
|
+
"""Remove duplicate entries from memories.
|
158
|
+
|
159
|
+
Args:
|
160
|
+
memories: Raw memory content
|
161
|
+
|
162
|
+
Returns:
|
163
|
+
Deduplicated memories
|
164
|
+
"""
|
165
|
+
deduplicated = {}
|
166
|
+
|
167
|
+
for key, content in memories.items():
|
168
|
+
if not content:
|
169
|
+
continue
|
170
|
+
|
171
|
+
# Split into lines and remove duplicates while preserving order
|
172
|
+
lines = content.split("\n")
|
173
|
+
seen = set()
|
174
|
+
unique_lines = []
|
175
|
+
|
176
|
+
for line in lines:
|
177
|
+
# Skip empty lines in deduplication
|
178
|
+
if not line.strip():
|
179
|
+
unique_lines.append(line)
|
180
|
+
continue
|
181
|
+
|
182
|
+
# Add non-duplicate lines
|
183
|
+
if line not in seen:
|
184
|
+
seen.add(line)
|
185
|
+
unique_lines.append(line)
|
186
|
+
|
187
|
+
deduplicated[key] = "\n".join(unique_lines)
|
188
|
+
|
189
|
+
return deduplicated
|
190
|
+
|
191
|
+
def migrate_legacy_memories(self) -> bool:
|
192
|
+
"""Migrate memories from old .claude/ locations to new .claude-mpm/ locations.
|
193
|
+
|
194
|
+
Returns:
|
195
|
+
True if any migrations were performed
|
196
|
+
"""
|
197
|
+
migrated = False
|
198
|
+
|
199
|
+
# Define migration paths
|
200
|
+
migrations = [
|
201
|
+
# Project memories
|
202
|
+
(
|
203
|
+
Path.cwd() / ".claude" / "memories" / "PM_memories.md",
|
204
|
+
Path.cwd() / ".claude-mpm" / "memories" / "PM_memories.md",
|
205
|
+
),
|
206
|
+
# User memories
|
207
|
+
(
|
208
|
+
Path.home() / ".claude" / "memories" / "PM_memories.md",
|
209
|
+
Path.home() / ".claude-mpm" / "memories" / "PM_memories.md",
|
210
|
+
),
|
211
|
+
]
|
212
|
+
|
213
|
+
for old_path, new_path in migrations:
|
214
|
+
if old_path.exists() and not new_path.exists():
|
215
|
+
try:
|
216
|
+
# Create new directory if needed
|
217
|
+
new_path.parent.mkdir(parents=True, exist_ok=True)
|
218
|
+
|
219
|
+
# Copy content
|
220
|
+
content = old_path.read_text()
|
221
|
+
new_path.write_text(content)
|
222
|
+
|
223
|
+
self.logger.info(f"Migrated memories from {old_path} to {new_path}")
|
224
|
+
migrated = True
|
225
|
+
except Exception as e:
|
226
|
+
self.logger.error(
|
227
|
+
f"Failed to migrate memories from {old_path}: {e}"
|
228
|
+
)
|
229
|
+
|
230
|
+
return migrated
|
@@ -0,0 +1,146 @@
|
|
1
|
+
"""Metadata extraction and processing for framework files."""
|
2
|
+
|
3
|
+
import re
|
4
|
+
import time
|
5
|
+
from pathlib import Path
|
6
|
+
from typing import Any, Dict, Optional, Tuple
|
7
|
+
|
8
|
+
import yaml
|
9
|
+
|
10
|
+
from claude_mpm.core.logging_utils import get_logger
|
11
|
+
|
12
|
+
|
13
|
+
class MetadataProcessor:
|
14
|
+
"""Processes and extracts metadata from framework files and agents."""
|
15
|
+
|
16
|
+
def __init__(self):
|
17
|
+
"""Initialize the metadata processor."""
|
18
|
+
self.logger = get_logger("metadata_processor")
|
19
|
+
|
20
|
+
def extract_metadata_from_content(self, content: str) -> Dict[str, Optional[str]]:
|
21
|
+
"""Extract metadata from content string.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
content: Content to extract metadata from
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Dictionary with extracted metadata
|
28
|
+
"""
|
29
|
+
metadata = {
|
30
|
+
"version": None,
|
31
|
+
"last_modified": None,
|
32
|
+
}
|
33
|
+
|
34
|
+
# Extract version
|
35
|
+
version_match = re.search(r"<!-- FRAMEWORK_VERSION: (\d+) -->", content)
|
36
|
+
if version_match:
|
37
|
+
metadata["version"] = version_match.group(1)
|
38
|
+
self.logger.debug(f"Extracted version: {metadata['version']}")
|
39
|
+
|
40
|
+
# Extract timestamp
|
41
|
+
timestamp_match = re.search(r"<!-- LAST_MODIFIED: ([^>]+) -->", content)
|
42
|
+
if timestamp_match:
|
43
|
+
metadata["last_modified"] = timestamp_match.group(1).strip()
|
44
|
+
self.logger.debug(f"Extracted last_modified: {metadata['last_modified']}")
|
45
|
+
|
46
|
+
return metadata
|
47
|
+
|
48
|
+
def parse_agent_metadata(self, agent_file: Path) -> Optional[Dict[str, Any]]:
|
49
|
+
"""Parse agent metadata from deployed agent file.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
agent_file: Path to deployed agent file
|
53
|
+
|
54
|
+
Returns:
|
55
|
+
Dictionary with agent metadata or None
|
56
|
+
"""
|
57
|
+
try:
|
58
|
+
with open(agent_file) as f:
|
59
|
+
content = f.read()
|
60
|
+
|
61
|
+
# Default values
|
62
|
+
agent_data = {
|
63
|
+
"id": agent_file.stem,
|
64
|
+
"display_name": agent_file.stem.replace("_", " ")
|
65
|
+
.replace("-", " ")
|
66
|
+
.title(),
|
67
|
+
"description": "Specialized agent",
|
68
|
+
"file_path": str(agent_file),
|
69
|
+
"file_mtime": agent_file.stat().st_mtime,
|
70
|
+
}
|
71
|
+
|
72
|
+
# Extract YAML frontmatter if present
|
73
|
+
if content.startswith("---"):
|
74
|
+
end_marker = content.find("---", 3)
|
75
|
+
if end_marker > 0:
|
76
|
+
frontmatter = content[3:end_marker]
|
77
|
+
metadata = yaml.safe_load(frontmatter)
|
78
|
+
if metadata:
|
79
|
+
# Use name as ID for Task tool
|
80
|
+
agent_data["id"] = metadata.get("name", agent_data["id"])
|
81
|
+
agent_data["display_name"] = (
|
82
|
+
metadata.get("name", agent_data["display_name"])
|
83
|
+
.replace("-", " ")
|
84
|
+
.title()
|
85
|
+
)
|
86
|
+
|
87
|
+
# Copy all metadata fields directly
|
88
|
+
for key, value in metadata.items():
|
89
|
+
if key not in ["name"]: # Skip already processed fields
|
90
|
+
agent_data[key] = value
|
91
|
+
|
92
|
+
# IMPORTANT: Do NOT add spaces to tools field - it breaks deployment!
|
93
|
+
# Tools must remain as comma-separated without spaces: "Read,Write,Edit"
|
94
|
+
|
95
|
+
return agent_data
|
96
|
+
|
97
|
+
except Exception as e:
|
98
|
+
self.logger.debug(f"Could not parse metadata from {agent_file}: {e}")
|
99
|
+
return None
|
100
|
+
|
101
|
+
def extract_cache_metadata(self, data: Any, cache_key: str) -> Tuple[Any, float]:
|
102
|
+
"""Extract cache metadata for storage.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
data: Data to cache
|
106
|
+
cache_key: Cache key for identification
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
Tuple of (data, timestamp) for cache storage
|
110
|
+
"""
|
111
|
+
return data, time.time()
|
112
|
+
|
113
|
+
def validate_cache_metadata(
|
114
|
+
self,
|
115
|
+
cached_data: Tuple[Any, float],
|
116
|
+
file_path: Optional[Path] = None,
|
117
|
+
ttl: float = 60.0,
|
118
|
+
) -> bool:
|
119
|
+
"""Validate cache metadata for freshness.
|
120
|
+
|
121
|
+
Args:
|
122
|
+
cached_data: Tuple of (data, timestamp) from cache
|
123
|
+
file_path: Optional file path to check modification time
|
124
|
+
ttl: Time-to-live in seconds
|
125
|
+
|
126
|
+
Returns:
|
127
|
+
True if cache is valid, False otherwise
|
128
|
+
"""
|
129
|
+
try:
|
130
|
+
data, cache_time = cached_data
|
131
|
+
current_time = time.time()
|
132
|
+
|
133
|
+
# Check TTL
|
134
|
+
if current_time - cache_time > ttl:
|
135
|
+
return False
|
136
|
+
|
137
|
+
# Check file modification time if provided
|
138
|
+
if file_path and file_path.exists():
|
139
|
+
file_mtime = file_path.stat().st_mtime
|
140
|
+
if file_mtime > cache_time:
|
141
|
+
return False
|
142
|
+
|
143
|
+
return True
|
144
|
+
except Exception as e:
|
145
|
+
self.logger.debug(f"Cache validation failed: {e}")
|
146
|
+
return False
|
@@ -0,0 +1,244 @@
|
|
1
|
+
"""JSON template processor for agent configurations."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from pathlib import Path
|
5
|
+
from typing import Any, Dict, List, Optional
|
6
|
+
|
7
|
+
from claude_mpm.core.logging_utils import get_logger
|
8
|
+
|
9
|
+
# Import resource handling for packaged installations
|
10
|
+
try:
|
11
|
+
from importlib.resources import files
|
12
|
+
except ImportError:
|
13
|
+
try:
|
14
|
+
from importlib_resources import files
|
15
|
+
except ImportError:
|
16
|
+
files = None
|
17
|
+
|
18
|
+
|
19
|
+
class TemplateProcessor:
|
20
|
+
"""Processes JSON template files for agent configurations."""
|
21
|
+
|
22
|
+
def __init__(self, framework_path: Optional[Path] = None):
|
23
|
+
"""Initialize the template processor.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
framework_path: Path to framework installation
|
27
|
+
"""
|
28
|
+
self.logger = get_logger("template_processor")
|
29
|
+
self.framework_path = framework_path
|
30
|
+
|
31
|
+
def load_template(self, agent_name: str) -> Optional[Dict[str, Any]]:
|
32
|
+
"""Load JSON template for an agent.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
agent_name: Name of the agent
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
Template data or None if not found
|
39
|
+
"""
|
40
|
+
try:
|
41
|
+
# Check if we have a framework path
|
42
|
+
if not self.framework_path or self.framework_path == Path("__PACKAGED__"):
|
43
|
+
return self._load_packaged_template(agent_name)
|
44
|
+
|
45
|
+
# For development mode, load from filesystem
|
46
|
+
return self._load_filesystem_template(agent_name)
|
47
|
+
|
48
|
+
except Exception as e:
|
49
|
+
self.logger.debug(f"Could not load template for {agent_name}: {e}")
|
50
|
+
return None
|
51
|
+
|
52
|
+
def _load_packaged_template(self, agent_name: str) -> Optional[Dict[str, Any]]:
|
53
|
+
"""Load template from packaged installation.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
agent_name: Name of the agent
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
Template data or None if not found
|
60
|
+
"""
|
61
|
+
if not files:
|
62
|
+
return None
|
63
|
+
|
64
|
+
try:
|
65
|
+
templates_package = files("claude_mpm.agents.templates")
|
66
|
+
template_file = templates_package / f"{agent_name}.json"
|
67
|
+
|
68
|
+
if template_file.is_file():
|
69
|
+
template_content = template_file.read_text()
|
70
|
+
return json.loads(template_content)
|
71
|
+
except Exception as e:
|
72
|
+
self.logger.debug(f"Could not load packaged template for {agent_name}: {e}")
|
73
|
+
|
74
|
+
return None
|
75
|
+
|
76
|
+
def _load_filesystem_template(self, agent_name: str) -> Optional[Dict[str, Any]]:
|
77
|
+
"""Load template from filesystem.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
agent_name: Name of the agent
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
Template data or None if not found
|
84
|
+
"""
|
85
|
+
templates_dir = (
|
86
|
+
self.framework_path / "src" / "claude_mpm" / "agents" / "templates"
|
87
|
+
)
|
88
|
+
|
89
|
+
# Try exact match first
|
90
|
+
template_file = templates_dir / f"{agent_name}.json"
|
91
|
+
if template_file.exists():
|
92
|
+
with open(template_file) as f:
|
93
|
+
return json.load(f)
|
94
|
+
|
95
|
+
# Try alternative naming variations
|
96
|
+
alternative_names = self._get_alternative_names(agent_name)
|
97
|
+
for alt_name in alternative_names:
|
98
|
+
alt_file = templates_dir / f"{alt_name}.json"
|
99
|
+
if alt_file.exists():
|
100
|
+
with open(alt_file) as f:
|
101
|
+
return json.load(f)
|
102
|
+
|
103
|
+
return None
|
104
|
+
|
105
|
+
def _get_alternative_names(self, agent_name: str) -> List[str]:
|
106
|
+
"""Get alternative naming variations for an agent.
|
107
|
+
|
108
|
+
Args:
|
109
|
+
agent_name: Original agent name
|
110
|
+
|
111
|
+
Returns:
|
112
|
+
List of alternative names to try
|
113
|
+
"""
|
114
|
+
# Remove duplicates by using a set
|
115
|
+
return list(
|
116
|
+
{
|
117
|
+
agent_name.replace("-", "_"), # api-qa -> api_qa
|
118
|
+
agent_name.replace("_", "-"), # api_qa -> api-qa
|
119
|
+
agent_name.replace("-", ""), # api-qa -> apiqa
|
120
|
+
agent_name.replace("_", ""), # api_qa -> apiqa
|
121
|
+
agent_name.replace("-agent", ""), # research-agent -> research
|
122
|
+
agent_name.replace("_agent", ""), # research_agent -> research
|
123
|
+
agent_name + "_agent", # research -> research_agent
|
124
|
+
agent_name + "-agent", # research -> research-agent
|
125
|
+
}
|
126
|
+
)
|
127
|
+
|
128
|
+
def extract_routing(
|
129
|
+
self, template_data: Dict[str, Any]
|
130
|
+
) -> Optional[Dict[str, Any]]:
|
131
|
+
"""Extract routing information from template.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
template_data: Template data
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
Routing information or None
|
138
|
+
"""
|
139
|
+
return template_data.get("routing")
|
140
|
+
|
141
|
+
def extract_memory_routing(
|
142
|
+
self, template_data: Dict[str, Any]
|
143
|
+
) -> Optional[Dict[str, Any]]:
|
144
|
+
"""Extract memory routing information from template.
|
145
|
+
|
146
|
+
Args:
|
147
|
+
template_data: Template data
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
Memory routing information or None
|
151
|
+
"""
|
152
|
+
return template_data.get("memory_routing")
|
153
|
+
|
154
|
+
def extract_tools(self, template_data: Dict[str, Any]) -> str:
|
155
|
+
"""Extract tools string from template data.
|
156
|
+
|
157
|
+
Args:
|
158
|
+
template_data: Template data
|
159
|
+
|
160
|
+
Returns:
|
161
|
+
Tools string for display
|
162
|
+
"""
|
163
|
+
capabilities = template_data.get("capabilities", {})
|
164
|
+
tools = capabilities.get("tools", "*")
|
165
|
+
|
166
|
+
if tools == "*":
|
167
|
+
return "All Tools"
|
168
|
+
if isinstance(tools, list):
|
169
|
+
return ", ".join(tools) if tools else "Standard Tools"
|
170
|
+
if isinstance(tools, str):
|
171
|
+
return tools
|
172
|
+
return "Standard Tools"
|
173
|
+
|
174
|
+
def extract_metadata(self, template_data: Dict[str, Any]) -> Dict[str, Any]:
|
175
|
+
"""Extract agent metadata from template.
|
176
|
+
|
177
|
+
Args:
|
178
|
+
template_data: Template data
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
Dictionary with extracted metadata
|
182
|
+
"""
|
183
|
+
metadata = template_data.get("metadata", {})
|
184
|
+
agent_id = template_data.get("agent_id", "unknown")
|
185
|
+
|
186
|
+
return {
|
187
|
+
"id": agent_id,
|
188
|
+
"display_name": metadata.get("name", agent_id.replace("_", " ").title()),
|
189
|
+
"description": metadata.get("description", f"Agent {agent_id}"),
|
190
|
+
"authority": metadata.get("authority"),
|
191
|
+
"primary_function": metadata.get("primary_function"),
|
192
|
+
"handoff_to": metadata.get("handoff_to"),
|
193
|
+
"model": template_data.get("model", {}).get("model"),
|
194
|
+
"tools": self.extract_tools(template_data),
|
195
|
+
"routing": self.extract_routing(template_data),
|
196
|
+
"memory_routing": self.extract_memory_routing(template_data),
|
197
|
+
"author": template_data.get("author", "unknown"),
|
198
|
+
"version": template_data.get("agent_version", "1.0.0"),
|
199
|
+
}
|
200
|
+
|
201
|
+
def process_local_templates(self) -> Dict[str, Dict[str, Any]]:
|
202
|
+
"""Process all local JSON templates.
|
203
|
+
|
204
|
+
Returns:
|
205
|
+
Dictionary mapping agent IDs to processed metadata
|
206
|
+
"""
|
207
|
+
local_agents = {}
|
208
|
+
|
209
|
+
# Check for local JSON templates in priority order
|
210
|
+
template_dirs = [
|
211
|
+
Path.cwd() / ".claude-mpm" / "agents", # Project local agents
|
212
|
+
Path.home() / ".claude-mpm" / "agents", # User local agents
|
213
|
+
]
|
214
|
+
|
215
|
+
for priority, template_dir in enumerate(template_dirs):
|
216
|
+
if not template_dir.exists():
|
217
|
+
continue
|
218
|
+
|
219
|
+
for json_file in template_dir.glob("*.json"):
|
220
|
+
try:
|
221
|
+
with open(json_file) as f:
|
222
|
+
template_data = json.load(f)
|
223
|
+
|
224
|
+
agent_metadata = self.extract_metadata(template_data)
|
225
|
+
agent_id = agent_metadata["id"]
|
226
|
+
|
227
|
+
# Skip if already found at higher priority
|
228
|
+
if agent_id in local_agents:
|
229
|
+
continue
|
230
|
+
|
231
|
+
# Add local-specific fields
|
232
|
+
agent_metadata["is_local"] = True
|
233
|
+
agent_metadata["tier"] = "project" if priority == 0 else "user"
|
234
|
+
agent_metadata["source_file"] = str(json_file)
|
235
|
+
|
236
|
+
local_agents[agent_id] = agent_metadata
|
237
|
+
self.logger.debug(
|
238
|
+
f"Processed local template: {agent_id} from {template_dir}"
|
239
|
+
)
|
240
|
+
|
241
|
+
except Exception as e:
|
242
|
+
self.logger.warning(f"Failed to process template {json_file}: {e}")
|
243
|
+
|
244
|
+
return local_agents
|