claude-mpm 2.1.1__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. claude_mpm/_version.py +2 -2
  2. claude_mpm/agents/agent_loader.py +682 -102
  3. claude_mpm/agents/base_agent_loader.py +23 -8
  4. claude_mpm/agents/schema/agent_schema.json +237 -83
  5. claude_mpm/agents/templates/data_engineer.json +6 -3
  6. claude_mpm/agents/templates/documentation.json +6 -3
  7. claude_mpm/agents/templates/engineer.json +7 -4
  8. claude_mpm/agents/templates/ops.json +6 -3
  9. claude_mpm/agents/templates/qa.json +10 -5
  10. claude_mpm/agents/templates/research.json +31 -42
  11. claude_mpm/agents/templates/security.json +14 -6
  12. claude_mpm/agents/templates/version_control.json +9 -5
  13. claude_mpm/core/base_service.py +61 -1
  14. claude_mpm/hooks/claude_hooks/hook_handler.py +224 -20
  15. claude_mpm/schemas/README_SECURITY.md +92 -0
  16. claude_mpm/schemas/agent_schema.json +130 -51
  17. claude_mpm/schemas/agent_schema_security_notes.md +165 -0
  18. claude_mpm/services/agent_capabilities_generator.py +0 -1
  19. claude_mpm/services/agent_deployment.py +479 -91
  20. claude_mpm/services/agent_lifecycle_manager.py +62 -4
  21. claude_mpm/services/deployed_agent_discovery.py +0 -1
  22. claude_mpm/services/version_control/semantic_versioning.py +165 -16
  23. claude_mpm/validation/agent_validator.py +147 -13
  24. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/METADATA +2 -2
  25. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/RECORD +29 -29
  26. claude_mpm/cli_old/__init__.py +0 -1
  27. claude_mpm/cli_old/ticket_cli.py +0 -102
  28. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/WHEEL +0 -0
  29. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/entry_points.txt +0 -0
  30. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/licenses/LICENSE +0 -0
  31. {claude_mpm-2.1.1.dist-info → claude_mpm-3.0.0.dist-info}/top_level.txt +0 -0
@@ -758,7 +758,36 @@ class AgentLifecycleManager(BaseService):
758
758
  return False
759
759
 
760
760
  async def _update_performance_metrics(self, result: LifecycleOperationResult) -> None:
761
- """Update performance metrics with operation result."""
761
+ """Update performance metrics with operation result.
762
+
763
+ METRICS COLLECTION:
764
+ This method demonstrates a simple ETL pipeline for operational metrics:
765
+
766
+ 1. EXTRACT: Pull raw data from operation results
767
+ - Success/failure status
768
+ - Operation duration
769
+ - Cache invalidation events
770
+ - Operation type and agent tier
771
+
772
+ 2. TRANSFORM: Calculate derived metrics
773
+ - Success rates and failure percentages
774
+ - Rolling averages for performance
775
+ - Operation distribution by type
776
+ - Performance by agent tier
777
+
778
+ 3. LOAD: Store in metrics structure
779
+ - In-memory storage for real-time access
780
+ - Could be extended to push to:
781
+ * Time-series databases (Prometheus, InfluxDB)
782
+ * AI observability platforms (Datadog, New Relic)
783
+ * Custom analytics pipelines
784
+
785
+ OPTIMIZATION OPPORTUNITIES:
786
+ - Add percentile calculations (p50, p95, p99)
787
+ - Track operation queuing times
788
+ - Monitor resource usage per operation
789
+ - Implement sliding window metrics
790
+ """
762
791
  self.performance_metrics['total_operations'] += 1
763
792
 
764
793
  if result.success:
@@ -766,16 +795,45 @@ class AgentLifecycleManager(BaseService):
766
795
  else:
767
796
  self.performance_metrics['failed_operations'] += 1
768
797
 
769
- # Update average duration
798
+ # Update average duration using incremental calculation
799
+ # This avoids storing all durations in memory
770
800
  total_ops = self.performance_metrics['total_operations']
771
801
  current_avg = self.performance_metrics['average_duration_ms']
772
802
  new_avg = ((current_avg * (total_ops - 1)) + result.duration_ms) / total_ops
773
803
  self.performance_metrics['average_duration_ms'] = new_avg
774
804
 
805
+ # METRICS: Track operation type distribution
806
+ # This helps identify which operations are most common
807
+ op_type = result.operation.value
808
+ if 'operation_distribution' not in self.performance_metrics:
809
+ self.performance_metrics['operation_distribution'] = {}
810
+ self.performance_metrics['operation_distribution'][op_type] = \
811
+ self.performance_metrics['operation_distribution'].get(op_type, 0) + 1
812
+
813
+ # METRICS: Track performance by agent tier
814
+ # Useful for identifying tier-specific performance issues
815
+ if hasattr(result, 'tier') and result.tier:
816
+ if 'tier_performance' not in self.performance_metrics:
817
+ self.performance_metrics['tier_performance'] = {}
818
+ tier_name = result.tier.value if hasattr(result.tier, 'value') else str(result.tier)
819
+ if tier_name not in self.performance_metrics['tier_performance']:
820
+ self.performance_metrics['tier_performance'][tier_name] = {
821
+ 'count': 0,
822
+ 'total_duration_ms': 0,
823
+ 'average_duration_ms': 0
824
+ }
825
+ tier_metrics = self.performance_metrics['tier_performance'][tier_name]
826
+ tier_metrics['count'] += 1
827
+ tier_metrics['total_duration_ms'] += result.duration_ms
828
+ tier_metrics['average_duration_ms'] = \
829
+ tier_metrics['total_duration_ms'] / tier_metrics['count']
830
+
775
831
  # Update cache hit rate if cache was involved
776
832
  if result.cache_invalidated:
777
- # This would be more sophisticated in practice
778
- pass
833
+ # Track cache invalidation frequency
834
+ if 'cache_invalidations' not in self.performance_metrics:
835
+ self.performance_metrics['cache_invalidations'] = 0
836
+ self.performance_metrics['cache_invalidations'] += 1
779
837
 
780
838
  async def _handle_modification_event(self, modification: AgentModification) -> None:
781
839
  """Handle modification events from tracker."""
@@ -61,7 +61,6 @@ class DeployedAgentDiscovery:
61
61
  logger.error(f"Failed to extract info from agent {agent}: {e}")
62
62
  continue
63
63
 
64
- logger.info(f"Discovered {len(deployed_agents)} deployable agents from registry ({filtered_count} templates/base agents filtered out)")
65
64
  return deployed_agents
66
65
 
67
66
  except Exception as e:
@@ -7,6 +7,34 @@ This module provides comprehensive semantic versioning management including:
7
7
  3. Changelog generation and management
8
8
  4. Tag creation and management
9
9
  5. Version metadata handling
10
+
11
+ Semantic Versioning Strategy:
12
+ - Follows semver.org specification 2.0.0
13
+ - Format: MAJOR.MINOR.PATCH[-PRERELEASE][+BUILD]
14
+ - MAJOR: Incompatible API changes
15
+ - MINOR: Backwards-compatible functionality additions
16
+ - PATCH: Backwards-compatible bug fixes
17
+ - PRERELEASE: Optional pre-release identifiers (alpha, beta, rc)
18
+ - BUILD: Optional build metadata
19
+
20
+ Agent Version Management:
21
+ - Agents use semantic versioning for consistency
22
+ - Version stored in agent template JSON files
23
+ - Automatic migration from old formats (serial, integer)
24
+ - Version comparison for deployment decisions
25
+ - Base and agent version tracking
26
+
27
+ Version Detection:
28
+ - Multiple file format support (package.json, pyproject.toml, etc.)
29
+ - Git tag integration for version history
30
+ - Changelog parsing for version tracking
31
+ - Fallback mechanisms for missing version info
32
+
33
+ Change Analysis:
34
+ - Conventional commit pattern matching
35
+ - Breaking change detection
36
+ - Feature and bug fix classification
37
+ - Confidence scoring for version bump suggestions
10
38
  """
11
39
 
12
40
  import re
@@ -32,7 +60,23 @@ class VersionBumpType(Enum):
32
60
 
33
61
  @dataclass
34
62
  class SemanticVersion:
35
- """Represents a semantic version."""
63
+ """Represents a semantic version following semver.org specification.
64
+
65
+ This class encapsulates a semantic version with support for:
66
+ - Major, minor, and patch version numbers
67
+ - Pre-release identifiers (alpha, beta, rc, etc.)
68
+ - Build metadata
69
+ - Version comparison and sorting
70
+ - Version bumping operations
71
+
72
+ The comparison logic follows semver precedence rules:
73
+ 1. Compare major, minor, patch numerically
74
+ 2. Pre-release versions have lower precedence than normal versions
75
+ 3. Pre-release identifiers are compared alphanumerically
76
+ 4. Build metadata is ignored in comparisons
77
+
78
+ This is used for both project versioning and agent version management.
79
+ """
36
80
 
37
81
  major: int
38
82
  minor: int
@@ -41,7 +85,14 @@ class SemanticVersion:
41
85
  build: Optional[str] = None
42
86
 
43
87
  def __str__(self) -> str:
44
- """String representation of version."""
88
+ """String representation of version in semver format.
89
+
90
+ Examples:
91
+ - 1.2.3
92
+ - 1.2.3-alpha.1
93
+ - 1.2.3-beta.2+build.123
94
+ - 1.2.3+20230615
95
+ """
45
96
  version = f"{self.major}.{self.minor}.{self.patch}"
46
97
  if self.prerelease:
47
98
  version += f"-{self.prerelease}"
@@ -50,7 +101,20 @@ class SemanticVersion:
50
101
  return version
51
102
 
52
103
  def __lt__(self, other: "SemanticVersion") -> bool:
53
- """Compare versions for sorting."""
104
+ """Compare versions for sorting according to semver precedence.
105
+
106
+ Comparison Rules:
107
+ 1. Version core (major.minor.patch) compared numerically
108
+ 2. Version with pre-release < same version without pre-release
109
+ 3. Pre-release versions compared alphanumerically
110
+ 4. Build metadata ignored (1.0.0+build1 == 1.0.0+build2)
111
+
112
+ This enables proper version sorting for:
113
+ - Determining latest version
114
+ - Agent deployment decisions
115
+ - Version history display
116
+ """
117
+ # Compare version core components
54
118
  if self.major != other.major:
55
119
  return self.major < other.major
56
120
  if self.minor != other.minor:
@@ -58,34 +122,68 @@ class SemanticVersion:
58
122
  if self.patch != other.patch:
59
123
  return self.patch < other.patch
60
124
 
61
- # Handle prerelease comparison
125
+ # Handle prerelease comparison per semver spec
126
+ # No prerelease > with prerelease (1.0.0 > 1.0.0-alpha)
62
127
  if self.prerelease is None and other.prerelease is not None:
63
128
  return False
64
129
  if self.prerelease is not None and other.prerelease is None:
65
130
  return True
131
+ # Both have prerelease - compare alphanumerically
66
132
  if self.prerelease is not None and other.prerelease is not None:
67
133
  return self.prerelease < other.prerelease
68
134
 
69
135
  return False
70
136
 
71
137
  def bump(self, bump_type: VersionBumpType) -> "SemanticVersion":
72
- """Create a new version with the specified bump applied."""
138
+ """Create a new version with the specified bump applied.
139
+
140
+ Version Bump Rules:
141
+ - MAJOR: Increment major, reset minor and patch to 0
142
+ - MINOR: Increment minor, reset patch to 0
143
+ - PATCH: Increment patch only
144
+ - PRERELEASE: Handle pre-release progression
145
+
146
+ Pre-release Progression:
147
+ - No prerelease -> alpha.1
148
+ - alpha.1 -> alpha.2
149
+ - beta.1 -> beta.2
150
+ - rc.1 -> rc.2
151
+ - custom -> custom.1
152
+
153
+ Examples:
154
+ - 1.2.3 + MAJOR -> 2.0.0
155
+ - 1.2.3 + MINOR -> 1.3.0
156
+ - 1.2.3 + PATCH -> 1.2.4
157
+ - 1.2.3 + PRERELEASE -> 1.2.3-alpha.1
158
+ - 1.2.3-alpha.1 + PRERELEASE -> 1.2.3-alpha.2
159
+
160
+ Args:
161
+ bump_type: Type of version bump to apply
162
+
163
+ Returns:
164
+ New SemanticVersion instance with bump applied
165
+ """
73
166
  if bump_type == VersionBumpType.MAJOR:
167
+ # Breaking changes - reset minor and patch
74
168
  return SemanticVersion(self.major + 1, 0, 0)
75
169
  elif bump_type == VersionBumpType.MINOR:
170
+ # New features - reset patch only
76
171
  return SemanticVersion(self.major, self.minor + 1, 0)
77
172
  elif bump_type == VersionBumpType.PATCH:
173
+ # Bug fixes - increment patch only
78
174
  return SemanticVersion(self.major, self.minor, self.patch + 1)
79
175
  elif bump_type == VersionBumpType.PRERELEASE:
80
176
  if self.prerelease:
81
- # Increment prerelease number
177
+ # Increment existing prerelease number
82
178
  match = re.match(r"(.+?)(\d+)$", self.prerelease)
83
179
  if match:
84
180
  prefix, num = match.groups()
85
181
  new_prerelease = f"{prefix}{int(num) + 1}"
86
182
  else:
183
+ # Add .1 if no number present
87
184
  new_prerelease = f"{self.prerelease}.1"
88
185
  else:
186
+ # Start new prerelease series
89
187
  new_prerelease = "alpha.1"
90
188
 
91
189
  return SemanticVersion(self.major, self.minor, self.patch, prerelease=new_prerelease)
@@ -182,6 +280,26 @@ class SemanticVersionManager:
182
280
  """
183
281
  Parse a version string into a SemanticVersion object.
184
282
 
283
+ Version String Formats Supported:
284
+ - 1.2.3 (basic semantic version)
285
+ - v1.2.3 (with 'v' prefix - common in git tags)
286
+ - 1.2.3-alpha (with prerelease)
287
+ - 1.2.3-alpha.1 (with prerelease and number)
288
+ - 1.2.3-beta.2+build.123 (full format)
289
+ - 1.2.3+20230615 (with build metadata only)
290
+
291
+ The parser is flexible and handles:
292
+ - Optional 'v' prefix (stripped automatically)
293
+ - Whitespace trimming
294
+ - Full semver specification compliance
295
+ - Graceful failure for invalid formats
296
+
297
+ This is used for:
298
+ - Parsing versions from files (package.json, etc.)
299
+ - Converting git tags to versions
300
+ - Agent version parsing and migration
301
+ - User input validation
302
+
185
303
  Args:
186
304
  version_string: Version string to parse
187
305
 
@@ -189,10 +307,11 @@ class SemanticVersionManager:
189
307
  SemanticVersion object or None if parsing fails
190
308
  """
191
309
  try:
192
- # Clean up version string
310
+ # Clean up version string - handle common variations
193
311
  version_string = version_string.strip().lstrip("v")
194
312
 
195
- # Regex pattern for semantic version
313
+ # Regex pattern for semantic version per semver.org spec
314
+ # Captures: major.minor.patch[-prerelease][+build]
196
315
  pattern = r"^(\d+)\.(\d+)\.(\d+)(?:-([a-zA-Z0-9\-\.]+))?(?:\+([a-zA-Z0-9\-\.]+))?$"
197
316
  match = re.match(pattern, version_string)
198
317
 
@@ -327,20 +446,50 @@ class SemanticVersionManager:
327
446
  """
328
447
  Analyze changes to suggest version bump type.
329
448
 
449
+ Change Analysis Process:
450
+ 1. Scan each change description for patterns
451
+ 2. Categorize changes (breaking, feature, fix)
452
+ 3. Determine highest priority change type
453
+ 4. Suggest appropriate version bump
454
+ 5. Calculate confidence score
455
+
456
+ Pattern Matching:
457
+ - Breaking: "breaking", "breaking change", "remove api", etc.
458
+ - Features: "add", "new feature", "implement", "enhance"
459
+ - Fixes: "fix", "bug fix", "resolve", "correct"
460
+
461
+ Version Bump Priority:
462
+ 1. Breaking changes -> MAJOR (highest priority)
463
+ 2. New features -> MINOR
464
+ 3. Bug fixes -> PATCH
465
+ 4. Other changes -> PATCH (default)
466
+
467
+ Confidence Scoring:
468
+ - 0.9: Clear breaking changes detected
469
+ - 0.8: Clear new features detected
470
+ - 0.7: Clear bug fixes detected
471
+ - 0.5: No clear patterns (default to patch)
472
+
473
+ This analysis is used for:
474
+ - Conventional commit integration
475
+ - Automated version bumping
476
+ - Release note generation
477
+ - Agent version updates
478
+
330
479
  Args:
331
480
  changes: List of change descriptions (e.g., commit messages)
332
481
 
333
482
  Returns:
334
- ChangeAnalysis with suggested version bump
483
+ ChangeAnalysis with suggested version bump and confidence
335
484
  """
336
485
  analysis = ChangeAnalysis()
337
486
  analysis.change_descriptions = changes
338
487
 
339
- # Analyze each change
488
+ # Analyze each change against defined patterns
340
489
  for change in changes:
341
490
  change_lower = change.lower()
342
491
 
343
- # Check for breaking changes
492
+ # Check for breaking changes (highest priority)
344
493
  if any(re.search(pattern, change_lower) for pattern in self.breaking_change_patterns):
345
494
  analysis.has_breaking_changes = True
346
495
 
@@ -352,19 +501,19 @@ class SemanticVersionManager:
352
501
  elif any(re.search(pattern, change_lower) for pattern in self.bug_fix_patterns):
353
502
  analysis.has_bug_fixes = True
354
503
 
355
- # Determine suggested bump
504
+ # Determine suggested bump based on priority
356
505
  if analysis.has_breaking_changes:
357
506
  analysis.suggested_bump = VersionBumpType.MAJOR
358
- analysis.confidence = 0.9
507
+ analysis.confidence = 0.9 # High confidence for breaking changes
359
508
  elif analysis.has_new_features:
360
509
  analysis.suggested_bump = VersionBumpType.MINOR
361
- analysis.confidence = 0.8
510
+ analysis.confidence = 0.8 # Good confidence for features
362
511
  elif analysis.has_bug_fixes:
363
512
  analysis.suggested_bump = VersionBumpType.PATCH
364
- analysis.confidence = 0.7
513
+ analysis.confidence = 0.7 # Moderate confidence for fixes
365
514
  else:
366
515
  analysis.suggested_bump = VersionBumpType.PATCH
367
- analysis.confidence = 0.5
516
+ analysis.confidence = 0.5 # Low confidence, default to safe patch
368
517
 
369
518
  return analysis
370
519
 
@@ -3,6 +3,20 @@ Agent validation framework using JSON Schema validation.
3
3
 
4
4
  This module provides comprehensive validation for agent configurations
5
5
  using the standardized JSON schema with direct validation approach.
6
+
7
+ Security Features:
8
+ - Input validation using JSON Schema to prevent malformed data
9
+ - Path traversal protection in file operations
10
+ - Resource limit validation to prevent resource exhaustion
11
+ - Strict schema validation with no additional properties allowed
12
+ - Character limit enforcement to prevent memory exhaustion
13
+ - Safe JSON parsing with error handling
14
+
15
+ Security Considerations:
16
+ - All file paths should be validated and sanitized
17
+ - Agent IDs must follow strict naming conventions
18
+ - Resource limits prevent denial of service attacks
19
+ - Schema validation prevents injection of unexpected fields
6
20
  """
7
21
 
8
22
  import json
@@ -27,7 +41,16 @@ class ValidationResult:
27
41
 
28
42
 
29
43
  class AgentValidator:
30
- """Validates agent configurations against JSON schema."""
44
+ """Validates agent configurations against JSON schema.
45
+
46
+ SECURITY CRITICAL: This class is the primary defense against malicious agent
47
+ configurations. All agent data must pass through this validator before being
48
+ used by the system. Bypassing this validator could lead to:
49
+ - Arbitrary code execution (via tool access)
50
+ - Resource exhaustion (via resource limits)
51
+ - Data exfiltration (via file/network access)
52
+ - Privilege escalation (via tool combinations)
53
+ """
31
54
 
32
55
  def __init__(self, schema_path: Optional[Path] = None):
33
56
  """Initialize the validator with the agent schema."""
@@ -39,8 +62,20 @@ class AgentValidator:
39
62
  self.validator = Draft7Validator(self.schema)
40
63
 
41
64
  def _load_schema(self) -> Dict[str, Any]:
42
- """Load the JSON schema from file."""
65
+ """Load the JSON schema from file.
66
+
67
+ Security Considerations:
68
+ - Schema file path is validated to exist and be a file
69
+ - JSON parsing errors are caught and logged
70
+ - Schema tampering would be detected by validation failures
71
+ """
43
72
  try:
73
+ # SECURITY: Validate schema path exists and is a file
74
+ if not self.schema_path.exists():
75
+ raise FileNotFoundError(f"Schema file not found: {self.schema_path}")
76
+ if not self.schema_path.is_file():
77
+ raise ValueError(f"Schema path is not a file: {self.schema_path}")
78
+
44
79
  with open(self.schema_path, 'r') as f:
45
80
  return json.load(f)
46
81
  except Exception as e:
@@ -51,6 +86,12 @@ class AgentValidator:
51
86
  """
52
87
  Validate a single agent configuration against the schema.
53
88
 
89
+ Security Features:
90
+ - Strict JSON Schema validation prevents unexpected fields
91
+ - Business rule validation adds additional security checks
92
+ - Input size limits prevent memory exhaustion
93
+ - Agent ID format validation prevents injection attacks
94
+
54
95
  Args:
55
96
  agent_data: Agent configuration dictionary
56
97
 
@@ -71,28 +112,38 @@ class AgentValidator:
71
112
  path = ".".join(str(p) for p in e.path)
72
113
  result.errors.append(f"Error at path: {path}")
73
114
 
74
- # Additional business rule validations
115
+ # SECURITY: Additional business rule validations beyond schema
116
+ # These provide defense-in-depth security checks
75
117
  if result.is_valid:
76
118
  self._validate_business_rules(agent_data, result)
77
119
 
78
120
  # Add metadata
79
121
  result.metadata = {
80
122
  "validated_at": datetime.utcnow().isoformat(),
81
- "schema_version": self.schema.get("version", "1.0.0"),
123
+ "schema_version": self.schema.get("version", "1.1.0"),
82
124
  "agent_id": agent_data.get("id", "unknown")
83
125
  }
84
126
 
85
127
  return result
86
128
 
87
129
  def _validate_business_rules(self, agent_data: Dict[str, Any], result: ValidationResult) -> None:
88
- """Apply additional business rule validations beyond schema."""
130
+ """Apply additional business rule validations beyond schema.
131
+
132
+ Security Validations:
133
+ - Resource limits to prevent DoS attacks
134
+ - Instruction length limits to prevent memory exhaustion
135
+ - Agent ID format to prevent injection attacks
136
+ - Tool compatibility to prevent privilege escalation
137
+ - Self-reference prevention in handoff agents
138
+ """
89
139
 
90
140
  # Validate resource tier consistency
91
141
  resource_tier = agent_data.get("capabilities", {}).get("resource_tier")
92
142
  if resource_tier:
93
143
  self._validate_resource_tier_limits(agent_data, resource_tier, result)
94
144
 
95
- # Validate instruction length (double-check)
145
+ # SECURITY: Validate instruction length to prevent memory exhaustion
146
+ # Double-check even though schema enforces this - defense in depth
96
147
  instructions = agent_data.get("instructions", "")
97
148
  if len(instructions) > 8000:
98
149
  result.errors.append(f"Instructions exceed 8000 character limit: {len(instructions)} characters")
@@ -101,19 +152,36 @@ class AgentValidator:
101
152
  # Validate model compatibility with tools
102
153
  self._validate_model_tool_compatibility(agent_data, result)
103
154
 
104
- # Validate agent ID format (clean IDs without _agent suffix)
155
+ # SECURITY: Validate agent ID format to prevent injection attacks
156
+ # Pattern enforced: ^[a-z][a-z0-9_]*$ prevents special characters
105
157
  agent_id = agent_data.get("id", "")
106
158
  if agent_id.endswith("_agent"):
107
159
  result.warnings.append(f"Agent ID '{agent_id}' contains deprecated '_agent' suffix")
108
160
 
109
- # Validate handoff agents exist
161
+ # SECURITY: Additional ID validation for defense in depth
162
+ if agent_id and not agent_id.replace('_', '').replace('-', '').isalnum():
163
+ result.errors.append(f"Agent ID '{agent_id}' contains invalid characters")
164
+ result.is_valid = False
165
+
166
+ # SECURITY: Validate handoff agents to prevent circular references and privilege escalation
110
167
  handoff_agents = agent_data.get("interactions", {}).get("handoff_agents", [])
111
168
  for handoff_id in handoff_agents:
112
169
  if handoff_id == agent_id:
113
170
  result.warnings.append(f"Agent '{agent_id}' references itself in handoff_agents")
171
+ # SECURITY: Ensure handoff IDs follow same pattern as agent IDs
172
+ if handoff_id and not handoff_id.replace('_', '').replace('-', '').isalnum():
173
+ result.errors.append(f"Handoff agent ID '{handoff_id}' contains invalid characters")
174
+ result.is_valid = False
114
175
 
115
176
  def _validate_resource_tier_limits(self, agent_data: Dict[str, Any], tier: str, result: ValidationResult) -> None:
116
- """Validate resource limits match the tier constraints."""
177
+ """Validate resource limits match the tier constraints.
178
+
179
+ Security Purpose:
180
+ - Prevents resource exhaustion attacks
181
+ - Ensures agents can't request excessive resources
182
+ - Enforces fair resource allocation
183
+ - Prevents denial of service through resource hogging
184
+ """
117
185
  tier_limits = {
118
186
  "intensive": {
119
187
  "memory_limit": (4096, 8192),
@@ -182,7 +250,8 @@ class AgentValidator:
182
250
  f"Haiku model '{model}' using resource-intensive tools: {used_intensive}"
183
251
  )
184
252
 
185
- # Network access requirement
253
+ # SECURITY: Network access requirement validation
254
+ # Ensures agents can't use network tools without explicit permission
186
255
  network_tools = {"WebSearch", "WebFetch", "aws", "gcloud", "azure"}
187
256
  needs_network = bool(set(tools) & network_tools)
188
257
  has_network = agent_data.get("capabilities", {}).get("network_access", False)
@@ -191,10 +260,38 @@ class AgentValidator:
191
260
  result.warnings.append(
192
261
  f"Agent uses network tools {set(tools) & network_tools} but network_access is False"
193
262
  )
263
+
264
+ # SECURITY: Check for potentially dangerous tool combinations
265
+ dangerous_combos = [
266
+ ({"Bash", "Write"}, "Can execute arbitrary code by writing and running scripts"),
267
+ ({"docker", "kubectl"}, "Container escape potential with both tools"),
268
+ ({"aws", "gcloud", "azure"}, "Multiple cloud access increases attack surface")
269
+ ]
270
+
271
+ for combo, risk in dangerous_combos:
272
+ if combo.issubset(set(tools)):
273
+ result.warnings.append(f"Potentially dangerous tool combination: {combo} - {risk}")
194
274
 
195
275
  def validate_file(self, file_path: Path) -> ValidationResult:
196
- """Validate an agent configuration file."""
276
+ """Validate an agent configuration file.
277
+
278
+ Security Measures:
279
+ - Path traversal protection through Path object
280
+ - Safe JSON parsing with error handling
281
+ - File size limits should be enforced by caller
282
+ """
197
283
  try:
284
+ # SECURITY: Validate file path
285
+ if not file_path.exists():
286
+ raise FileNotFoundError(f"File not found: {file_path}")
287
+ if not file_path.is_file():
288
+ raise ValueError(f"Path is not a file: {file_path}")
289
+
290
+ # SECURITY: Check file size to prevent memory exhaustion
291
+ file_size = file_path.stat().st_size
292
+ max_size = 1024 * 1024 # 1MB limit for agent configs
293
+ if file_size > max_size:
294
+ raise ValueError(f"File too large: {file_size} bytes (max {max_size} bytes)")
198
295
  with open(file_path, 'r') as f:
199
296
  agent_data = json.load(f)
200
297
 
@@ -212,13 +309,39 @@ class AgentValidator:
212
309
  return result
213
310
 
214
311
  def validate_directory(self, directory: Path) -> Dict[str, ValidationResult]:
215
- """Validate all agent files in a directory."""
312
+ """Validate all agent files in a directory.
313
+
314
+ Security Considerations:
315
+ - Directory traversal prevention through Path.glob
316
+ - Symlink following should be disabled in production
317
+ - Large directory DoS prevention through file count limits
318
+ """
216
319
  results = {}
217
320
 
321
+ # SECURITY: Validate directory exists and is accessible
322
+ if not directory.exists():
323
+ raise FileNotFoundError(f"Directory not found: {directory}")
324
+ if not directory.is_dir():
325
+ raise ValueError(f"Path is not a directory: {directory}")
326
+
327
+ # SECURITY: Limit number of files to prevent DoS
328
+ max_files = 100
329
+ file_count = 0
330
+
218
331
  for json_file in directory.glob("*.json"):
219
332
  if json_file.name == "agent_schema.json":
220
333
  continue
221
334
 
335
+ # SECURITY: Skip symlinks to prevent directory traversal
336
+ if json_file.is_symlink():
337
+ logger.warning(f"Skipping symlink: {json_file}")
338
+ continue
339
+
340
+ file_count += 1
341
+ if file_count > max_files:
342
+ logger.warning(f"Reached maximum file limit ({max_files}), stopping validation")
343
+ break
344
+
222
345
  logger.info(f"Validating {json_file}")
223
346
  results[json_file.name] = self.validate_file(json_file)
224
347
 
@@ -239,6 +362,11 @@ def validate_agent_migration(old_agent: Dict[str, Any], new_agent: Dict[str, Any
239
362
  """
240
363
  Validate that a migrated agent maintains compatibility.
241
364
 
365
+ Security Importance:
366
+ - Ensures privilege escalation doesn't occur during migration
367
+ - Validates that security constraints are preserved
368
+ - Prevents addition of dangerous tools without review
369
+
242
370
  Args:
243
371
  old_agent: Original agent configuration
244
372
  new_agent: Migrated agent configuration
@@ -248,7 +376,7 @@ def validate_agent_migration(old_agent: Dict[str, Any], new_agent: Dict[str, Any
248
376
  """
249
377
  result = ValidationResult(is_valid=True)
250
378
 
251
- # Check that core functionality is preserved
379
+ # SECURITY: Check that core functionality is preserved without privilege escalation
252
380
  old_tools = set(old_agent.get("configuration_fields", {}).get("tools", []))
253
381
  new_tools = set(new_agent.get("capabilities", {}).get("tools", []))
254
382
 
@@ -259,6 +387,12 @@ def validate_agent_migration(old_agent: Dict[str, Any], new_agent: Dict[str, Any
259
387
  result.warnings.append(f"Tools removed in migration: {missing}")
260
388
  if added:
261
389
  result.warnings.append(f"Tools added in migration: {added}")
390
+ # SECURITY: Flag addition of dangerous tools
391
+ dangerous_tools = {"Bash", "docker", "kubectl", "aws", "gcloud", "azure"}
392
+ dangerous_added = added & dangerous_tools
393
+ if dangerous_added:
394
+ result.errors.append(f"SECURITY: Dangerous tools added in migration: {dangerous_added}")
395
+ result.is_valid = False
262
396
 
263
397
  # Check instruction preservation
264
398
  old_instructions = old_agent.get("narrative_fields", {}).get("instructions", "")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: claude-mpm
3
- Version: 2.1.1
3
+ Version: 3.0.0
4
4
  Summary: Claude Multi-agent Project Manager - Clean orchestration with ticket management
5
5
  Home-page: https://github.com/bobmatnyc/claude-mpm
6
6
  Author: Claude MPM Team
@@ -19,7 +19,7 @@ Classifier: Programming Language :: Python :: 3.12
19
19
  Requires-Python: >=3.8
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
- Requires-Dist: ai-trackdown-pytools>=1.2.0
22
+ Requires-Dist: ai-trackdown-pytools>=1.4.0
23
23
  Requires-Dist: pyyaml>=6.0
24
24
  Requires-Dist: python-dotenv>=0.19.0
25
25
  Requires-Dist: rich>=13.0.0