claude-mpm 1.1.0__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/_version.py +4 -33
- claude_mpm/agents/INSTRUCTIONS.md +109 -319
- claude_mpm/agents/agent_loader.py +184 -278
- claude_mpm/agents/base_agent.json +1 -1
- claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json +46 -0
- claude_mpm/agents/templates/{engineer_agent.json → backup/engineer_agent_20250726_234551.json} +1 -1
- claude_mpm/agents/templates/data_engineer.json +107 -0
- claude_mpm/agents/templates/documentation.json +106 -0
- claude_mpm/agents/templates/engineer.json +110 -0
- claude_mpm/agents/templates/ops.json +106 -0
- claude_mpm/agents/templates/qa.json +106 -0
- claude_mpm/agents/templates/research.json +75 -0
- claude_mpm/agents/templates/security.json +105 -0
- claude_mpm/agents/templates/version_control.json +103 -0
- claude_mpm/cli.py +80 -11
- claude_mpm/core/simple_runner.py +45 -5
- claude_mpm/hooks/claude_hooks/hook_handler.py +115 -1
- claude_mpm/schemas/agent_schema.json +328 -0
- claude_mpm/services/agent_capabilities_generator.py +182 -0
- claude_mpm/services/agent_deployment.py +228 -37
- claude_mpm/services/deployed_agent_discovery.py +222 -0
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +29 -0
- claude_mpm/services/framework_claude_md_generator/deployment_manager.py +29 -7
- claude_mpm/utils/framework_detection.py +39 -0
- claude_mpm/validation/agent_validator.py +252 -125
- {claude_mpm-1.1.0.dist-info → claude_mpm-2.1.0.dist-info}/METADATA +108 -26
- {claude_mpm-1.1.0.dist-info → claude_mpm-2.1.0.dist-info}/RECORD +36 -25
- claude_mpm/agents/templates/data_engineer_agent.json +0 -46
- claude_mpm/agents/templates/update-optimized-specialized-agents.json +0 -374
- /claude_mpm/agents/templates/{documentation_agent.json → backup/documentation_agent_20250726_234551.json} +0 -0
- /claude_mpm/agents/templates/{ops_agent.json → backup/ops_agent_20250726_234551.json} +0 -0
- /claude_mpm/agents/templates/{qa_agent.json → backup/qa_agent_20250726_234551.json} +0 -0
- /claude_mpm/agents/templates/{research_agent.json → backup/research_agent_20250726_234551.json} +0 -0
- /claude_mpm/agents/templates/{security_agent.json → backup/security_agent_20250726_234551.json} +0 -0
- /claude_mpm/agents/templates/{version_control_agent.json → backup/version_control_agent_20250726_234551.json} +0 -0
- {claude_mpm-1.1.0.dist-info → claude_mpm-2.1.0.dist-info}/WHEEL +0 -0
- {claude_mpm-1.1.0.dist-info → claude_mpm-2.1.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-1.1.0.dist-info → claude_mpm-2.1.0.dist-info}/top_level.txt +0 -0
|
@@ -8,6 +8,12 @@ import hashlib
|
|
|
8
8
|
from datetime import datetime
|
|
9
9
|
from typing import Dict, List, Optional, Any
|
|
10
10
|
from collections import OrderedDict
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
from claude_mpm.services.deployed_agent_discovery import DeployedAgentDiscovery
|
|
14
|
+
from claude_mpm.services.agent_capabilities_generator import AgentCapabilitiesGenerator
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
11
17
|
|
|
12
18
|
|
|
13
19
|
class ContentAssembler:
|
|
@@ -16,6 +22,9 @@ class ContentAssembler:
|
|
|
16
22
|
def __init__(self):
|
|
17
23
|
"""Initialize content assembler."""
|
|
18
24
|
self.template_variables = {}
|
|
25
|
+
self.agent_discovery = DeployedAgentDiscovery()
|
|
26
|
+
self.capabilities_generator = AgentCapabilitiesGenerator()
|
|
27
|
+
logger.debug("Initialized ContentAssembler with dynamic agent capabilities support")
|
|
19
28
|
|
|
20
29
|
def generate_content_hash(self) -> str:
|
|
21
30
|
"""
|
|
@@ -63,12 +72,32 @@ class ContentAssembler:
|
|
|
63
72
|
"""
|
|
64
73
|
Apply template variable substitution to content.
|
|
65
74
|
|
|
75
|
+
WHY: Enhanced to support dynamic agent capabilities generation.
|
|
76
|
+
- Generates fresh agent capabilities on each call
|
|
77
|
+
- Provides graceful fallback if generation fails
|
|
78
|
+
- Ensures INSTRUCTIONS.md always reflects current deployed agents
|
|
79
|
+
|
|
66
80
|
Args:
|
|
67
81
|
content: Content with template variables
|
|
68
82
|
|
|
69
83
|
Returns:
|
|
70
84
|
str: Content with variables substituted
|
|
71
85
|
"""
|
|
86
|
+
# Check if we need to generate dynamic capabilities
|
|
87
|
+
if "{{capabilities-list}}" in content:
|
|
88
|
+
try:
|
|
89
|
+
# Discover deployed agents
|
|
90
|
+
deployed_agents = self.agent_discovery.discover_deployed_agents()
|
|
91
|
+
# Generate capabilities content
|
|
92
|
+
capabilities_content = self.capabilities_generator.generate_capabilities_section(deployed_agents)
|
|
93
|
+
# Add to template variables
|
|
94
|
+
self.template_variables['capabilities-list'] = capabilities_content
|
|
95
|
+
logger.info(f"Generated dynamic capabilities for {len(deployed_agents)} agents")
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(f"Failed to generate dynamic capabilities: {e}")
|
|
98
|
+
# Fallback is handled by the generator's internal fallback mechanism
|
|
99
|
+
|
|
100
|
+
# Apply all template variables
|
|
72
101
|
for var_name, var_value in self.template_variables.items():
|
|
73
102
|
placeholder = f"{{{{{var_name}}}}}"
|
|
74
103
|
content = content.replace(placeholder, var_value)
|
|
@@ -35,6 +35,11 @@ class DeploymentManager:
|
|
|
35
35
|
"""
|
|
36
36
|
Deploy generated content to a parent directory.
|
|
37
37
|
|
|
38
|
+
WHY: Enhanced to ensure fresh agent capabilities generation on each deployment.
|
|
39
|
+
- Checks for template variables that need processing
|
|
40
|
+
- Re-processes content to get current deployed agents
|
|
41
|
+
- Ensures INSTRUCTIONS.md always reflects latest agent configuration
|
|
42
|
+
|
|
38
43
|
Args:
|
|
39
44
|
content: Content to deploy
|
|
40
45
|
parent_path: Path to parent directory
|
|
@@ -52,16 +57,33 @@ class DeploymentManager:
|
|
|
52
57
|
target_file = parent_path / "INSTRUCTIONS.md"
|
|
53
58
|
# TODO: Make this configurable via parameter
|
|
54
59
|
|
|
60
|
+
# Check if content contains template variables that need processing
|
|
61
|
+
if '{{capabilities-list}}' in content:
|
|
62
|
+
# Content needs processing - let ContentAssembler handle it
|
|
63
|
+
from .content_assembler import ContentAssembler
|
|
64
|
+
assembler = ContentAssembler()
|
|
65
|
+
|
|
66
|
+
# Re-process content to get fresh agent data
|
|
67
|
+
# Pass content as a single section to preserve structure
|
|
68
|
+
processed_content = assembler.apply_template_variables(content)
|
|
69
|
+
content = processed_content
|
|
70
|
+
|
|
55
71
|
# Validate content before deployment
|
|
56
|
-
|
|
57
|
-
if
|
|
58
|
-
|
|
72
|
+
# Skip validation for INSTRUCTIONS.md format (different from CLAUDE.md)
|
|
73
|
+
if "<!-- FRAMEWORK_VERSION:" in content and "# Claude Multi-Agent Project Manager Instructions" in content:
|
|
74
|
+
# This is INSTRUCTIONS.md format, skip CLAUDE.md validation
|
|
75
|
+
pass
|
|
76
|
+
else:
|
|
77
|
+
# This is CLAUDE.md format, validate normally
|
|
78
|
+
is_valid, issues = self.validator.validate_content(content)
|
|
79
|
+
if not is_valid:
|
|
80
|
+
return False, f"Validation failed: {'; '.join(issues)}"
|
|
59
81
|
|
|
60
82
|
# Check if file exists and compare versions
|
|
61
83
|
if target_file.exists() and not force:
|
|
62
84
|
with open(target_file, 'r') as f:
|
|
63
85
|
existing_content = f.read()
|
|
64
|
-
existing_fw_ver
|
|
86
|
+
existing_fw_ver = self.version_manager.parse_current_version(existing_content)
|
|
65
87
|
|
|
66
88
|
if existing_fw_ver == self.version_manager.framework_version:
|
|
67
89
|
return True, f"Version {existing_fw_ver} already deployed"
|
|
@@ -76,8 +98,8 @@ class DeploymentManager:
|
|
|
76
98
|
f.write(content)
|
|
77
99
|
|
|
78
100
|
# Get version info for success message
|
|
79
|
-
fw_ver
|
|
80
|
-
version_str =
|
|
101
|
+
fw_ver = self.version_manager.parse_current_version(content)
|
|
102
|
+
version_str = fw_ver
|
|
81
103
|
|
|
82
104
|
return True, f"Successfully deployed version {version_str}"
|
|
83
105
|
except Exception as e:
|
|
@@ -101,7 +123,7 @@ class DeploymentManager:
|
|
|
101
123
|
try:
|
|
102
124
|
with open(target_file, 'r') as f:
|
|
103
125
|
existing_content = f.read()
|
|
104
|
-
existing_fw_ver
|
|
126
|
+
existing_fw_ver = self.version_manager.parse_current_version(existing_content)
|
|
105
127
|
|
|
106
128
|
if existing_fw_ver != self.version_manager.framework_version:
|
|
107
129
|
return True, f"Version mismatch: {existing_fw_ver} vs {self.version_manager.framework_version}"
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Framework source directory detection utilities.
|
|
2
|
+
|
|
3
|
+
WHY: This module provides utilities to detect if we're in the framework source directory
|
|
4
|
+
to prevent accidental overwrites of the template files during deployment.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Tuple, List
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def is_framework_source_directory(path: Path) -> Tuple[bool, List[str]]:
|
|
12
|
+
"""
|
|
13
|
+
Check if the given path is the framework source directory.
|
|
14
|
+
|
|
15
|
+
WHY: We need to prevent deployment to the framework source directory itself
|
|
16
|
+
to avoid overwriting template files.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
path: Path to check
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Tuple of (is_framework_source, list of detected markers)
|
|
23
|
+
"""
|
|
24
|
+
markers = []
|
|
25
|
+
|
|
26
|
+
# Check for framework source markers
|
|
27
|
+
if (path / "src" / "claude_mpm").exists():
|
|
28
|
+
markers.append("src/claude_mpm")
|
|
29
|
+
|
|
30
|
+
if (path / "pyproject.toml").exists():
|
|
31
|
+
markers.append("pyproject.toml")
|
|
32
|
+
|
|
33
|
+
if (path / "src" / "claude_mpm" / "agents" / "INSTRUCTIONS.md").exists():
|
|
34
|
+
markers.append("framework INSTRUCTIONS.md template")
|
|
35
|
+
|
|
36
|
+
# If we have multiple markers, it's likely the framework source
|
|
37
|
+
is_framework = len(markers) >= 2
|
|
38
|
+
|
|
39
|
+
return is_framework, markers
|
|
@@ -1,15 +1,18 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Agent validation framework
|
|
2
|
+
Agent validation framework using JSON Schema validation.
|
|
3
3
|
|
|
4
|
-
This module provides comprehensive validation for agent configurations
|
|
5
|
-
|
|
4
|
+
This module provides comprehensive validation for agent configurations
|
|
5
|
+
using the standardized JSON schema with direct validation approach.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import json
|
|
8
9
|
import logging
|
|
9
10
|
from pathlib import Path
|
|
10
|
-
from typing import Dict, List,
|
|
11
|
-
import yaml
|
|
11
|
+
from typing import Dict, List, Optional, Any, Tuple
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
import jsonschema
|
|
15
|
+
from jsonschema import validate, ValidationError, Draft7Validator
|
|
13
16
|
|
|
14
17
|
logger = logging.getLogger(__name__)
|
|
15
18
|
|
|
@@ -20,156 +23,280 @@ class ValidationResult:
|
|
|
20
23
|
is_valid: bool
|
|
21
24
|
errors: List[str] = field(default_factory=list)
|
|
22
25
|
warnings: List[str] = field(default_factory=list)
|
|
23
|
-
|
|
24
|
-
applied_overrides: Dict[str, Any] = field(default_factory=dict)
|
|
26
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
25
27
|
|
|
26
28
|
|
|
27
29
|
class AgentValidator:
|
|
28
|
-
"""Validates agent configurations
|
|
30
|
+
"""Validates agent configurations against JSON schema."""
|
|
29
31
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
self.
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
def __init__(self, schema_path: Optional[Path] = None):
|
|
33
|
+
"""Initialize the validator with the agent schema."""
|
|
34
|
+
if schema_path is None:
|
|
35
|
+
schema_path = Path(__file__).parent.parent / "schemas" / "agent_schema.json"
|
|
36
|
+
|
|
37
|
+
self.schema_path = schema_path
|
|
38
|
+
self.schema = self._load_schema()
|
|
39
|
+
self.validator = Draft7Validator(self.schema)
|
|
38
40
|
|
|
39
|
-
def
|
|
40
|
-
"""Load
|
|
41
|
+
def _load_schema(self) -> Dict[str, Any]:
|
|
42
|
+
"""Load the JSON schema from file."""
|
|
41
43
|
try:
|
|
42
|
-
with open(
|
|
43
|
-
|
|
44
|
-
return data.get('overrides', {})
|
|
44
|
+
with open(self.schema_path, 'r') as f:
|
|
45
|
+
return json.load(f)
|
|
45
46
|
except Exception as e:
|
|
46
|
-
logger.
|
|
47
|
-
|
|
47
|
+
logger.error(f"Failed to load schema from {self.schema_path}: {e}")
|
|
48
|
+
raise
|
|
48
49
|
|
|
49
|
-
def
|
|
50
|
-
"""
|
|
50
|
+
def validate_agent(self, agent_data: Dict[str, Any]) -> ValidationResult:
|
|
51
|
+
"""
|
|
52
|
+
Validate a single agent configuration against the schema.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
agent_data: Agent configuration dictionary
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
ValidationResult with validation status and any errors/warnings
|
|
59
|
+
"""
|
|
51
60
|
result = ValidationResult(is_valid=True)
|
|
52
61
|
|
|
53
|
-
#
|
|
54
|
-
|
|
55
|
-
|
|
62
|
+
# Perform JSON schema validation
|
|
63
|
+
try:
|
|
64
|
+
validate(instance=agent_data, schema=self.schema)
|
|
65
|
+
except ValidationError as e:
|
|
66
|
+
result.is_valid = False
|
|
67
|
+
result.errors.append(f"Schema validation error: {e.message}")
|
|
68
|
+
|
|
69
|
+
# Add path information if available
|
|
70
|
+
if e.path:
|
|
71
|
+
path = ".".join(str(p) for p in e.path)
|
|
72
|
+
result.errors.append(f"Error at path: {path}")
|
|
56
73
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
74
|
+
# Additional business rule validations
|
|
75
|
+
if result.is_valid:
|
|
76
|
+
self._validate_business_rules(agent_data, result)
|
|
60
77
|
|
|
61
|
-
#
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
# Validate prompt template
|
|
68
|
-
if 'prompt_template' in config and 'prompt_template' not in locked_fields:
|
|
69
|
-
template_result = self._validate_prompt_template(config['prompt_template'])
|
|
70
|
-
if not template_result[0]:
|
|
71
|
-
result.errors.extend(template_result[1])
|
|
72
|
-
result.is_valid = False
|
|
73
|
-
|
|
74
|
-
# Validate tools if present
|
|
75
|
-
if 'tools' in config:
|
|
76
|
-
tools_result = self._validate_tools(config['tools'])
|
|
77
|
-
if not tools_result[0]:
|
|
78
|
-
result.errors.extend(tools_result[1])
|
|
79
|
-
result.is_valid = False
|
|
78
|
+
# Add metadata
|
|
79
|
+
result.metadata = {
|
|
80
|
+
"validated_at": datetime.utcnow().isoformat(),
|
|
81
|
+
"schema_version": self.schema.get("version", "1.0.0"),
|
|
82
|
+
"agent_id": agent_data.get("id", "unknown")
|
|
83
|
+
}
|
|
80
84
|
|
|
81
85
|
return result
|
|
82
86
|
|
|
83
|
-
def
|
|
84
|
-
"""Apply
|
|
85
|
-
|
|
86
|
-
|
|
87
|
+
def _validate_business_rules(self, agent_data: Dict[str, Any], result: ValidationResult) -> None:
|
|
88
|
+
"""Apply additional business rule validations beyond schema."""
|
|
89
|
+
|
|
90
|
+
# Validate resource tier consistency
|
|
91
|
+
resource_tier = agent_data.get("capabilities", {}).get("resource_tier")
|
|
92
|
+
if resource_tier:
|
|
93
|
+
self._validate_resource_tier_limits(agent_data, resource_tier, result)
|
|
94
|
+
|
|
95
|
+
# Validate instruction length (double-check)
|
|
96
|
+
instructions = agent_data.get("instructions", "")
|
|
97
|
+
if len(instructions) > 8000:
|
|
98
|
+
result.errors.append(f"Instructions exceed 8000 character limit: {len(instructions)} characters")
|
|
99
|
+
result.is_valid = False
|
|
87
100
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
skip_validation = override_config.get('skip_validation', False)
|
|
101
|
+
# Validate model compatibility with tools
|
|
102
|
+
self._validate_model_tool_compatibility(agent_data, result)
|
|
91
103
|
|
|
92
|
-
#
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
if override_config.get(field, False):
|
|
97
|
-
locked_fields.add(base_field)
|
|
98
|
-
elif field not in ['notes', 'skip_validation']:
|
|
99
|
-
config[field] = value
|
|
104
|
+
# Validate agent ID format (clean IDs without _agent suffix)
|
|
105
|
+
agent_id = agent_data.get("id", "")
|
|
106
|
+
if agent_id.endswith("_agent"):
|
|
107
|
+
result.warnings.append(f"Agent ID '{agent_id}' contains deprecated '_agent' suffix")
|
|
100
108
|
|
|
101
|
-
|
|
109
|
+
# Validate handoff agents exist
|
|
110
|
+
handoff_agents = agent_data.get("interactions", {}).get("handoff_agents", [])
|
|
111
|
+
for handoff_id in handoff_agents:
|
|
112
|
+
if handoff_id == agent_id:
|
|
113
|
+
result.warnings.append(f"Agent '{agent_id}' references itself in handoff_agents")
|
|
102
114
|
|
|
103
|
-
def
|
|
104
|
-
"""Validate
|
|
105
|
-
|
|
115
|
+
def _validate_resource_tier_limits(self, agent_data: Dict[str, Any], tier: str, result: ValidationResult) -> None:
|
|
116
|
+
"""Validate resource limits match the tier constraints."""
|
|
117
|
+
tier_limits = {
|
|
118
|
+
"intensive": {
|
|
119
|
+
"memory_limit": (4096, 8192),
|
|
120
|
+
"cpu_limit": (60, 100),
|
|
121
|
+
"timeout": (600, 3600)
|
|
122
|
+
},
|
|
123
|
+
"standard": {
|
|
124
|
+
"memory_limit": (2048, 4096),
|
|
125
|
+
"cpu_limit": (30, 60),
|
|
126
|
+
"timeout": (300, 1200)
|
|
127
|
+
},
|
|
128
|
+
"lightweight": {
|
|
129
|
+
"memory_limit": (512, 2048),
|
|
130
|
+
"cpu_limit": (10, 30),
|
|
131
|
+
"timeout": (30, 600)
|
|
132
|
+
}
|
|
133
|
+
}
|
|
106
134
|
|
|
107
|
-
if
|
|
108
|
-
|
|
109
|
-
return False, errors
|
|
135
|
+
if tier not in tier_limits:
|
|
136
|
+
return
|
|
110
137
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
missing_placeholders = []
|
|
138
|
+
limits = tier_limits[tier]
|
|
139
|
+
capabilities = agent_data.get("capabilities", {})
|
|
114
140
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
141
|
+
# Check memory limit
|
|
142
|
+
memory = capabilities.get("memory_limit")
|
|
143
|
+
if memory is not None:
|
|
144
|
+
min_mem, max_mem = limits["memory_limit"]
|
|
145
|
+
if not (min_mem <= memory <= max_mem):
|
|
146
|
+
result.warnings.append(
|
|
147
|
+
f"Memory limit {memory}MB outside recommended range "
|
|
148
|
+
f"{min_mem}-{max_mem}MB for tier '{tier}'"
|
|
149
|
+
)
|
|
118
150
|
|
|
119
|
-
|
|
120
|
-
|
|
151
|
+
# Check CPU limit
|
|
152
|
+
cpu = capabilities.get("cpu_limit")
|
|
153
|
+
if cpu is not None:
|
|
154
|
+
min_cpu, max_cpu = limits["cpu_limit"]
|
|
155
|
+
if not (min_cpu <= cpu <= max_cpu):
|
|
156
|
+
result.warnings.append(
|
|
157
|
+
f"CPU limit {cpu}% outside recommended range "
|
|
158
|
+
f"{min_cpu}-{max_cpu}% for tier '{tier}'"
|
|
159
|
+
)
|
|
121
160
|
|
|
122
|
-
|
|
161
|
+
# Check timeout
|
|
162
|
+
timeout = capabilities.get("timeout")
|
|
163
|
+
if timeout is not None:
|
|
164
|
+
min_timeout, max_timeout = limits["timeout"]
|
|
165
|
+
if not (min_timeout <= timeout <= max_timeout):
|
|
166
|
+
result.warnings.append(
|
|
167
|
+
f"Timeout {timeout}s outside recommended range "
|
|
168
|
+
f"{min_timeout}-{max_timeout}s for tier '{tier}'"
|
|
169
|
+
)
|
|
123
170
|
|
|
124
|
-
def
|
|
125
|
-
"""Validate
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
if not isinstance(tools, list):
|
|
129
|
-
errors.append("Tools must be a list")
|
|
130
|
-
return False, errors
|
|
171
|
+
def _validate_model_tool_compatibility(self, agent_data: Dict[str, Any], result: ValidationResult) -> None:
|
|
172
|
+
"""Validate that model and tools are compatible."""
|
|
173
|
+
model = agent_data.get("capabilities", {}).get("model", "")
|
|
174
|
+
tools = agent_data.get("capabilities", {}).get("tools", [])
|
|
131
175
|
|
|
132
|
-
#
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
176
|
+
# Haiku models shouldn't use resource-intensive tools
|
|
177
|
+
if "haiku" in model.lower():
|
|
178
|
+
intensive_tools = {"docker", "kubectl", "terraform", "aws", "gcloud", "azure"}
|
|
179
|
+
used_intensive = set(tools) & intensive_tools
|
|
180
|
+
if used_intensive:
|
|
181
|
+
result.warnings.append(
|
|
182
|
+
f"Haiku model '{model}' using resource-intensive tools: {used_intensive}"
|
|
183
|
+
)
|
|
137
184
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
185
|
+
# Network access requirement
|
|
186
|
+
network_tools = {"WebSearch", "WebFetch", "aws", "gcloud", "azure"}
|
|
187
|
+
needs_network = bool(set(tools) & network_tools)
|
|
188
|
+
has_network = agent_data.get("capabilities", {}).get("network_access", False)
|
|
141
189
|
|
|
142
|
-
|
|
190
|
+
if needs_network and not has_network:
|
|
191
|
+
result.warnings.append(
|
|
192
|
+
f"Agent uses network tools {set(tools) & network_tools} but network_access is False"
|
|
193
|
+
)
|
|
143
194
|
|
|
144
|
-
def
|
|
145
|
-
"""Validate an
|
|
146
|
-
result = ValidationResult(is_valid=True)
|
|
147
|
-
|
|
195
|
+
def validate_file(self, file_path: Path) -> ValidationResult:
|
|
196
|
+
"""Validate an agent configuration file."""
|
|
148
197
|
try:
|
|
149
|
-
with open(
|
|
150
|
-
|
|
198
|
+
with open(file_path, 'r') as f:
|
|
199
|
+
agent_data = json.load(f)
|
|
200
|
+
|
|
201
|
+
result = self.validate_agent(agent_data)
|
|
202
|
+
result.metadata["file_path"] = str(file_path)
|
|
203
|
+
return result
|
|
204
|
+
|
|
205
|
+
except json.JSONDecodeError as e:
|
|
206
|
+
result = ValidationResult(is_valid=False)
|
|
207
|
+
result.errors.append(f"Invalid JSON in {file_path}: {e}")
|
|
208
|
+
return result
|
|
151
209
|
except Exception as e:
|
|
152
|
-
result
|
|
153
|
-
result.
|
|
210
|
+
result = ValidationResult(is_valid=False)
|
|
211
|
+
result.errors.append(f"Error reading {file_path}: {e}")
|
|
154
212
|
return result
|
|
213
|
+
|
|
214
|
+
def validate_directory(self, directory: Path) -> Dict[str, ValidationResult]:
|
|
215
|
+
"""Validate all agent files in a directory."""
|
|
216
|
+
results = {}
|
|
217
|
+
|
|
218
|
+
for json_file in directory.glob("*.json"):
|
|
219
|
+
if json_file.name == "agent_schema.json":
|
|
220
|
+
continue
|
|
221
|
+
|
|
222
|
+
logger.info(f"Validating {json_file}")
|
|
223
|
+
results[json_file.name] = self.validate_file(json_file)
|
|
155
224
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
225
|
+
return results
|
|
226
|
+
|
|
227
|
+
def get_schema_info(self) -> Dict[str, Any]:
|
|
228
|
+
"""Get information about the loaded schema."""
|
|
229
|
+
return {
|
|
230
|
+
"schema_path": str(self.schema_path),
|
|
231
|
+
"schema_title": self.schema.get("title", "Unknown"),
|
|
232
|
+
"schema_description": self.schema.get("description", ""),
|
|
233
|
+
"required_fields": self.schema.get("required", []),
|
|
234
|
+
"properties": list(self.schema.get("properties", {}).keys())
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def validate_agent_migration(old_agent: Dict[str, Any], new_agent: Dict[str, Any]) -> ValidationResult:
|
|
239
|
+
"""
|
|
240
|
+
Validate that a migrated agent maintains compatibility.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
old_agent: Original agent configuration
|
|
244
|
+
new_agent: Migrated agent configuration
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
ValidationResult with migration validation results
|
|
248
|
+
"""
|
|
249
|
+
result = ValidationResult(is_valid=True)
|
|
250
|
+
|
|
251
|
+
# Check that core functionality is preserved
|
|
252
|
+
old_tools = set(old_agent.get("configuration_fields", {}).get("tools", []))
|
|
253
|
+
new_tools = set(new_agent.get("capabilities", {}).get("tools", []))
|
|
254
|
+
|
|
255
|
+
if old_tools != new_tools:
|
|
256
|
+
missing = old_tools - new_tools
|
|
257
|
+
added = new_tools - old_tools
|
|
258
|
+
if missing:
|
|
259
|
+
result.warnings.append(f"Tools removed in migration: {missing}")
|
|
260
|
+
if added:
|
|
261
|
+
result.warnings.append(f"Tools added in migration: {added}")
|
|
262
|
+
|
|
263
|
+
# Check instruction preservation
|
|
264
|
+
old_instructions = old_agent.get("narrative_fields", {}).get("instructions", "")
|
|
265
|
+
new_instructions = new_agent.get("instructions", "")
|
|
266
|
+
|
|
267
|
+
if old_instructions and not new_instructions:
|
|
268
|
+
result.errors.append("Instructions lost in migration")
|
|
269
|
+
result.is_valid = False
|
|
270
|
+
elif len(old_instructions) > len(new_instructions) * 1.1: # Allow 10% reduction
|
|
271
|
+
result.warnings.append("Significant instruction content reduction in migration")
|
|
272
|
+
|
|
273
|
+
return result
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# Convenience functions
|
|
277
|
+
def validate_agent_file(file_path: Path) -> ValidationResult:
|
|
278
|
+
"""Validate a single agent file."""
|
|
279
|
+
validator = AgentValidator()
|
|
280
|
+
return validator.validate_file(file_path)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def validate_all_agents(directory: Path) -> Tuple[int, int, List[str]]:
|
|
284
|
+
"""
|
|
285
|
+
Validate all agents in a directory and return summary.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Tuple of (valid_count, invalid_count, error_messages)
|
|
289
|
+
"""
|
|
290
|
+
validator = AgentValidator()
|
|
291
|
+
results = validator.validate_directory(directory)
|
|
292
|
+
|
|
293
|
+
valid_count = sum(1 for r in results.values() if r.is_valid)
|
|
294
|
+
invalid_count = len(results) - valid_count
|
|
295
|
+
|
|
296
|
+
error_messages = []
|
|
297
|
+
for filename, result in results.items():
|
|
298
|
+
if not result.is_valid:
|
|
299
|
+
for error in result.errors:
|
|
300
|
+
error_messages.append(f"{filename}: {error}")
|
|
301
|
+
|
|
302
|
+
return valid_count, invalid_count, error_messages
|