claude-mpm 3.6.2__py3-none-any.whl → 3.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/frontmatter_validator.py +116 -17
- claude_mpm/agents/templates/.claude-mpm/memories/engineer_agent.md +39 -0
- claude_mpm/agents/templates/.claude-mpm/memories/qa_agent.md +38 -0
- claude_mpm/agents/templates/.claude-mpm/memories/research_agent.md +39 -0
- claude_mpm/agents/templates/code_analyzer.json +15 -8
- claude_mpm/agents/templates/data_engineer.json +4 -4
- claude_mpm/agents/templates/engineer.json +5 -5
- claude_mpm/agents/templates/research.json +12 -8
- claude_mpm/agents/templates/security.json +3 -3
- claude_mpm/agents/templates/ticketing.json +161 -0
- claude_mpm/agents/templates/web_qa.json +214 -0
- claude_mpm/agents/templates/web_ui.json +176 -0
- claude_mpm/cli/ticket_cli.py +31 -0
- claude_mpm/core/framework_loader.py +101 -49
- claude_mpm/services/agents/deployment/agent_deployment.py +5 -1
- claude_mpm/services/agents/deployment/async_agent_deployment.py +170 -13
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/METADATA +1 -1
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/RECORD +23 -17
- claude_mpm/agents/templates/test_integration.json +0 -113
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/WHEEL +0 -0
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.6.2.dist-info → claude_mpm-3.7.4.dist-info}/top_level.txt +0 -0
    
        claude_mpm/VERSION
    CHANGED
    
    | @@ -1 +1 @@ | |
| 1 | 
            -
            3. | 
| 1 | 
            +
            3.7.4
         | 
| @@ -34,6 +34,7 @@ class ValidationResult: | |
| 34 34 | 
             
                warnings: List[str]
         | 
| 35 35 | 
             
                corrections: List[str]
         | 
| 36 36 | 
             
                corrected_frontmatter: Optional[Dict[str, Any]] = None
         | 
| 37 | 
            +
                field_corrections: Optional[Dict[str, Any]] = None  # Specific field-level corrections
         | 
| 37 38 |  | 
| 38 39 |  | 
| 39 40 | 
             
            class FrontmatterValidator:
         | 
| @@ -109,6 +110,7 @@ class FrontmatterValidator: | |
| 109 110 | 
             
                def __init__(self):
         | 
| 110 111 | 
             
                    """Initialize the validator with schema if available."""
         | 
| 111 112 | 
             
                    self.schema = self._load_schema()
         | 
| 113 | 
            +
                    self.all_valid_fields = self._extract_valid_fields()
         | 
| 112 114 |  | 
| 113 115 | 
             
                def _load_schema(self) -> Optional[Dict[str, Any]]:
         | 
| 114 116 | 
             
                    """Load the frontmatter schema from JSON file."""
         | 
| @@ -121,6 +123,17 @@ class FrontmatterValidator: | |
| 121 123 | 
             
                            logger.warning(f"Failed to load frontmatter schema: {e}")
         | 
| 122 124 | 
             
                    return None
         | 
| 123 125 |  | 
| 126 | 
            +
                def _extract_valid_fields(self) -> set:
         | 
| 127 | 
            +
                    """Extract all valid field names from the schema."""
         | 
| 128 | 
            +
                    if self.schema and 'properties' in self.schema:
         | 
| 129 | 
            +
                        return set(self.schema['properties'].keys())
         | 
| 130 | 
            +
                    # Fallback to known fields if schema not available
         | 
| 131 | 
            +
                    return {
         | 
| 132 | 
            +
                        "name", "description", "version", "base_version", "author", 
         | 
| 133 | 
            +
                        "tools", "model", "tags", "category", "max_tokens", "temperature", 
         | 
| 134 | 
            +
                        "resource_tier", "dependencies", "capabilities", "color"
         | 
| 135 | 
            +
                    }
         | 
| 136 | 
            +
                
         | 
| 124 137 | 
             
                def validate_and_correct(self, frontmatter: Dict[str, Any]) -> ValidationResult:
         | 
| 125 138 | 
             
                    """
         | 
| 126 139 | 
             
                    Validate and automatically correct frontmatter.
         | 
| @@ -135,9 +148,10 @@ class FrontmatterValidator: | |
| 135 148 | 
             
                    warnings = []
         | 
| 136 149 | 
             
                    corrections = []
         | 
| 137 150 | 
             
                    corrected = frontmatter.copy()
         | 
| 151 | 
            +
                    field_corrections = {}  # Track only the fields that actually need correction
         | 
| 138 152 |  | 
| 139 | 
            -
                    # Required fields check
         | 
| 140 | 
            -
                    required_fields = ["name", "description", "version", "model"]
         | 
| 153 | 
            +
                    # Required fields check (from schema)
         | 
| 154 | 
            +
                    required_fields = self.schema.get('required', ["name", "description", "version", "model"]) if self.schema else ["name", "description", "version", "model"]
         | 
| 141 155 | 
             
                    for field in required_fields:
         | 
| 142 156 | 
             
                        if field not in corrected:
         | 
| 143 157 | 
             
                            errors.append(f"Missing required field: {field}")
         | 
| @@ -153,6 +167,7 @@ class FrontmatterValidator: | |
| 153 167 | 
             
                            fixed_name = re.sub(r"[^a-z0-9_]", "", fixed_name)
         | 
| 154 168 | 
             
                            if fixed_name and fixed_name[0].isalpha():
         | 
| 155 169 | 
             
                                corrected["name"] = fixed_name
         | 
| 170 | 
            +
                                field_corrections["name"] = fixed_name
         | 
| 156 171 | 
             
                                corrections.append(f"Corrected name from '{name}' to '{fixed_name}'")
         | 
| 157 172 | 
             
                            else:
         | 
| 158 173 | 
             
                                errors.append(f"Invalid name format: {name}")
         | 
| @@ -165,6 +180,7 @@ class FrontmatterValidator: | |
| 165 180 | 
             
                        if isinstance(model, (int, float)):
         | 
| 166 181 | 
             
                            model = str(model)
         | 
| 167 182 | 
             
                            corrected["model"] = model
         | 
| 183 | 
            +
                            field_corrections["model"] = model
         | 
| 168 184 | 
             
                            corrections.append(f"Converted model from number to string: {model}")
         | 
| 169 185 |  | 
| 170 186 | 
             
                        if not isinstance(model, str):
         | 
| @@ -173,6 +189,7 @@ class FrontmatterValidator: | |
| 173 189 | 
             
                            normalized_model = self._normalize_model(model)
         | 
| 174 190 | 
             
                            if normalized_model != model:
         | 
| 175 191 | 
             
                                corrected["model"] = normalized_model
         | 
| 192 | 
            +
                                field_corrections["model"] = normalized_model
         | 
| 176 193 | 
             
                                corrections.append(f"Normalized model from '{model}' to '{normalized_model}'")
         | 
| 177 194 |  | 
| 178 195 | 
             
                            if normalized_model not in self.VALID_MODELS:
         | 
| @@ -184,6 +201,7 @@ class FrontmatterValidator: | |
| 184 201 | 
             
                        corrected_tools, tool_corrections = self._correct_tools(tools)
         | 
| 185 202 | 
             
                        if tool_corrections:
         | 
| 186 203 | 
             
                            corrected["tools"] = corrected_tools
         | 
| 204 | 
            +
                            field_corrections["tools"] = corrected_tools
         | 
| 187 205 | 
             
                            corrections.extend(tool_corrections)
         | 
| 188 206 |  | 
| 189 207 | 
             
                        # Validate tool names
         | 
| @@ -195,6 +213,8 @@ class FrontmatterValidator: | |
| 195 213 | 
             
                                if corrected_tool:
         | 
| 196 214 | 
             
                                    idx = corrected_tools.index(tool)
         | 
| 197 215 | 
             
                                    corrected_tools[idx] = corrected_tool
         | 
| 216 | 
            +
                                    corrected["tools"] = corrected_tools
         | 
| 217 | 
            +
                                    field_corrections["tools"] = corrected_tools
         | 
| 198 218 | 
             
                                    corrections.append(f"Corrected tool '{tool}' to '{corrected_tool}'")
         | 
| 199 219 | 
             
                                else:
         | 
| 200 220 | 
             
                                    invalid_tools.append(tool)
         | 
| @@ -214,10 +234,12 @@ class FrontmatterValidator: | |
| 214 234 | 
             
                                if re.match(r"^\d+\.\d+$", version):
         | 
| 215 235 | 
             
                                    fixed_version = f"{version}.0"
         | 
| 216 236 | 
             
                                    corrected[field] = fixed_version
         | 
| 237 | 
            +
                                    field_corrections[field] = fixed_version
         | 
| 217 238 | 
             
                                    corrections.append(f"Fixed {field} from '{version}' to '{fixed_version}'")
         | 
| 218 239 | 
             
                                elif re.match(r"^v?\d+\.\d+\.\d+$", version):
         | 
| 219 240 | 
             
                                    fixed_version = version.lstrip("v")
         | 
| 220 241 | 
             
                                    corrected[field] = fixed_version
         | 
| 242 | 
            +
                                    field_corrections[field] = fixed_version
         | 
| 221 243 | 
             
                                    corrections.append(f"Fixed {field} from '{version}' to '{fixed_version}'")
         | 
| 222 244 | 
             
                                else:
         | 
| 223 245 | 
             
                                    errors.append(f"Invalid {field} format: {version}")
         | 
| @@ -243,6 +265,44 @@ class FrontmatterValidator: | |
| 243 265 | 
             
                        if corrected["resource_tier"] not in valid_tiers:
         | 
| 244 266 | 
             
                            warnings.append(f"Invalid resource_tier: {corrected['resource_tier']}")
         | 
| 245 267 |  | 
| 268 | 
            +
                    # Validate color field
         | 
| 269 | 
            +
                    if "color" in corrected:
         | 
| 270 | 
            +
                        color = corrected["color"]
         | 
| 271 | 
            +
                        if not isinstance(color, str):
         | 
| 272 | 
            +
                            errors.append(f"Field 'color' must be a string, got {type(color).__name__}")
         | 
| 273 | 
            +
                        # Color validation could be expanded to check for valid color names/hex codes
         | 
| 274 | 
            +
                    
         | 
| 275 | 
            +
                    # Validate author field
         | 
| 276 | 
            +
                    if "author" in corrected:
         | 
| 277 | 
            +
                        author = corrected["author"]
         | 
| 278 | 
            +
                        if not isinstance(author, str):
         | 
| 279 | 
            +
                            errors.append(f"Field 'author' must be a string, got {type(author).__name__}")
         | 
| 280 | 
            +
                        elif len(author) > 100:
         | 
| 281 | 
            +
                            warnings.append(f"Author field too long ({len(author)} chars, maximum 100)")
         | 
| 282 | 
            +
                    
         | 
| 283 | 
            +
                    # Validate tags field
         | 
| 284 | 
            +
                    if "tags" in corrected:
         | 
| 285 | 
            +
                        tags = corrected["tags"]
         | 
| 286 | 
            +
                        if not isinstance(tags, list):
         | 
| 287 | 
            +
                            errors.append(f"Field 'tags' must be a list, got {type(tags).__name__}")
         | 
| 288 | 
            +
                        else:
         | 
| 289 | 
            +
                            for tag in tags:
         | 
| 290 | 
            +
                                if not isinstance(tag, str):
         | 
| 291 | 
            +
                                    errors.append(f"All tags must be strings, found {type(tag).__name__}")
         | 
| 292 | 
            +
                                elif not re.match(r"^[a-z][a-z0-9-]*$", tag):
         | 
| 293 | 
            +
                                    warnings.append(f"Tag '{tag}' doesn't match recommended pattern (lowercase, alphanumeric with hyphens)")
         | 
| 294 | 
            +
                    
         | 
| 295 | 
            +
                    # Validate numeric fields
         | 
| 296 | 
            +
                    for field_name, (min_val, max_val) in [("max_tokens", (1000, 200000)), ("temperature", (0, 1))]:
         | 
| 297 | 
            +
                        if field_name in corrected:
         | 
| 298 | 
            +
                            value = corrected[field_name]
         | 
| 299 | 
            +
                            if field_name == "temperature" and not isinstance(value, (int, float)):
         | 
| 300 | 
            +
                                errors.append(f"Field '{field_name}' must be a number, got {type(value).__name__}")
         | 
| 301 | 
            +
                            elif field_name == "max_tokens" and not isinstance(value, int):
         | 
| 302 | 
            +
                                errors.append(f"Field '{field_name}' must be an integer, got {type(value).__name__}")
         | 
| 303 | 
            +
                            elif isinstance(value, (int, float)) and not (min_val <= value <= max_val):
         | 
| 304 | 
            +
                                warnings.append(f"Field '{field_name}' value {value} outside recommended range [{min_val}, {max_val}]")
         | 
| 305 | 
            +
                    
         | 
| 246 306 | 
             
                    # Determine if valid
         | 
| 247 307 | 
             
                    is_valid = len(errors) == 0
         | 
| 248 308 |  | 
| @@ -251,7 +311,8 @@ class FrontmatterValidator: | |
| 251 311 | 
             
                        errors=errors,
         | 
| 252 312 | 
             
                        warnings=warnings,
         | 
| 253 313 | 
             
                        corrections=corrections,
         | 
| 254 | 
            -
                        corrected_frontmatter=corrected if corrections else None
         | 
| 314 | 
            +
                        corrected_frontmatter=corrected if corrections else None,
         | 
| 315 | 
            +
                        field_corrections=field_corrections if field_corrections else None
         | 
| 255 316 | 
             
                    )
         | 
| 256 317 |  | 
| 257 318 | 
             
                def _normalize_model(self, model: str) -> str:
         | 
| @@ -416,7 +477,7 @@ class FrontmatterValidator: | |
| 416 477 | 
             
                    """
         | 
| 417 478 | 
             
                    result = self.validate_file(file_path)
         | 
| 418 479 |  | 
| 419 | 
            -
                    if result. | 
| 480 | 
            +
                    if result.field_corrections and not dry_run:
         | 
| 420 481 | 
             
                        try:
         | 
| 421 482 | 
             
                            with open(file_path, 'r') as f:
         | 
| 422 483 | 
             
                                content = f.read()
         | 
| @@ -428,21 +489,59 @@ class FrontmatterValidator: | |
| 428 489 | 
             
                                    end_marker = content.find("\n---\r\n", 4)
         | 
| 429 490 |  | 
| 430 491 | 
             
                                if end_marker != -1:
         | 
| 431 | 
            -
                                    #  | 
| 432 | 
            -
                                     | 
| 433 | 
            -
             | 
| 434 | 
            -
                                         | 
| 435 | 
            -
                                        sort_keys=False
         | 
| 492 | 
            +
                                    # Apply field-level corrections to preserve structure
         | 
| 493 | 
            +
                                    frontmatter_content = content[4:end_marker]
         | 
| 494 | 
            +
                                    corrected_content = self._apply_field_corrections(
         | 
| 495 | 
            +
                                        frontmatter_content, result.field_corrections
         | 
| 436 496 | 
             
                                    )
         | 
| 437 | 
            -
                                    new_content = f"---\n{new_frontmatter}---\n{content[end_marker + 5:]}"
         | 
| 438 | 
            -
                                    
         | 
| 439 | 
            -
                                    with open(file_path, 'w') as f:
         | 
| 440 | 
            -
                                        f.write(new_content)
         | 
| 441 497 |  | 
| 442 | 
            -
                                     | 
| 443 | 
            -
             | 
| 444 | 
            -
                                         | 
| 498 | 
            +
                                    if corrected_content != frontmatter_content:
         | 
| 499 | 
            +
                                        new_content = f"---\n{corrected_content}\n---\n{content[end_marker + 5:]}"
         | 
| 500 | 
            +
                                        
         | 
| 501 | 
            +
                                        with open(file_path, 'w') as f:
         | 
| 502 | 
            +
                                            f.write(new_content)
         | 
| 503 | 
            +
                                        
         | 
| 504 | 
            +
                                        logger.info(f"Corrected frontmatter in {file_path}")
         | 
| 505 | 
            +
                                        for correction in result.corrections:
         | 
| 506 | 
            +
                                            logger.info(f"  - {correction}")
         | 
| 445 507 | 
             
                        except Exception as e:
         | 
| 446 508 | 
             
                            logger.error(f"Failed to write corrections to {file_path}: {e}")
         | 
| 447 509 |  | 
| 448 | 
            -
                    return result
         | 
| 510 | 
            +
                    return result
         | 
| 511 | 
            +
                
         | 
| 512 | 
            +
                def _apply_field_corrections(self, frontmatter_content: str, field_corrections: Dict[str, Any]) -> str:
         | 
| 513 | 
            +
                    """
         | 
| 514 | 
            +
                    Apply field-level corrections while preserving structure and other fields.
         | 
| 515 | 
            +
                    
         | 
| 516 | 
            +
                    Args:
         | 
| 517 | 
            +
                        frontmatter_content: Original YAML frontmatter content
         | 
| 518 | 
            +
                        field_corrections: Dict of field corrections to apply
         | 
| 519 | 
            +
                        
         | 
| 520 | 
            +
                    Returns:
         | 
| 521 | 
            +
                        Corrected frontmatter content
         | 
| 522 | 
            +
                    """
         | 
| 523 | 
            +
                    lines = frontmatter_content.strip().split('\n')
         | 
| 524 | 
            +
                    corrected_lines = []
         | 
| 525 | 
            +
                    
         | 
| 526 | 
            +
                    for line in lines:
         | 
| 527 | 
            +
                        # Check if this line contains a field we need to correct
         | 
| 528 | 
            +
                        if ':' in line:
         | 
| 529 | 
            +
                            field_name = line.split(':')[0].strip()
         | 
| 530 | 
            +
                            if field_name in field_corrections:
         | 
| 531 | 
            +
                                # Replace the field value while preserving structure
         | 
| 532 | 
            +
                                corrected_value = field_corrections[field_name]
         | 
| 533 | 
            +
                                if isinstance(corrected_value, list):
         | 
| 534 | 
            +
                                    # Handle list fields like tools
         | 
| 535 | 
            +
                                    if field_name == "tools" and isinstance(corrected_value, list):
         | 
| 536 | 
            +
                                        # Format as comma-separated string to preserve existing format
         | 
| 537 | 
            +
                                        corrected_lines.append(f"{field_name}: {','.join(corrected_value)}")
         | 
| 538 | 
            +
                                    else:
         | 
| 539 | 
            +
                                        corrected_lines.append(f"{field_name}: {corrected_value}")
         | 
| 540 | 
            +
                                else:
         | 
| 541 | 
            +
                                    corrected_lines.append(f"{field_name}: {corrected_value}")
         | 
| 542 | 
            +
                                continue
         | 
| 543 | 
            +
                        
         | 
| 544 | 
            +
                        # Keep the original line if no correction needed
         | 
| 545 | 
            +
                        corrected_lines.append(line)
         | 
| 546 | 
            +
                    
         | 
| 547 | 
            +
                    return '\n'.join(corrected_lines)
         | 
| @@ -0,0 +1,39 @@ | |
| 1 | 
            +
            # Engineer Agent Memory - templates
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            <!-- MEMORY LIMITS: 8KB max | 10 sections max | 15 items per section -->
         | 
| 4 | 
            +
            <!-- Last Updated: 2025-08-13 14:34:34 | Auto-updated by: engineer -->
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            ## Project Context
         | 
| 7 | 
            +
            templates: mixed standard application
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            ## Project Architecture
         | 
| 10 | 
            +
            - Standard Application with mixed implementation
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            ## Coding Patterns Learned
         | 
| 13 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            ## Implementation Guidelines
         | 
| 16 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            ## Domain-Specific Knowledge
         | 
| 19 | 
            +
            <!-- Agent-specific knowledge for templates domain -->
         | 
| 20 | 
            +
            - Key project terms: templates
         | 
| 21 | 
            +
            - Focus on implementation patterns, coding standards, and best practices
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            ## Effective Strategies
         | 
| 24 | 
            +
            <!-- Successful approaches discovered through experience -->
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            ## Common Mistakes to Avoid
         | 
| 27 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            ## Integration Points
         | 
| 30 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            ## Performance Considerations
         | 
| 33 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ## Current Technical Context
         | 
| 36 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            ## Recent Learnings
         | 
| 39 | 
            +
            <!-- Most recent discoveries and insights -->
         | 
| @@ -0,0 +1,38 @@ | |
| 1 | 
            +
            # Qa Agent Memory - templates
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            <!-- MEMORY LIMITS: 8KB max | 10 sections max | 15 items per section -->
         | 
| 4 | 
            +
            <!-- Last Updated: 2025-08-13 14:37:34 | Auto-updated by: qa -->
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            ## Project Context
         | 
| 7 | 
            +
            templates: mixed standard application
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            ## Project Architecture
         | 
| 10 | 
            +
            - Standard Application with mixed implementation
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            ## Coding Patterns Learned
         | 
| 13 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            ## Implementation Guidelines
         | 
| 16 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            ## Domain-Specific Knowledge
         | 
| 19 | 
            +
            <!-- Agent-specific knowledge for templates domain -->
         | 
| 20 | 
            +
            - Key project terms: templates
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            ## Effective Strategies
         | 
| 23 | 
            +
            <!-- Successful approaches discovered through experience -->
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            ## Common Mistakes to Avoid
         | 
| 26 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            ## Integration Points
         | 
| 29 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            ## Performance Considerations
         | 
| 32 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            ## Current Technical Context
         | 
| 35 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            ## Recent Learnings
         | 
| 38 | 
            +
            <!-- Most recent discoveries and insights -->
         | 
| @@ -0,0 +1,39 @@ | |
| 1 | 
            +
            # Research Agent Memory - templates
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            <!-- MEMORY LIMITS: 16KB max | 10 sections max | 15 items per section -->
         | 
| 4 | 
            +
            <!-- Last Updated: 2025-08-13 14:29:28 | Auto-updated by: research -->
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            ## Project Context
         | 
| 7 | 
            +
            templates: mixed standard application
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            ## Project Architecture
         | 
| 10 | 
            +
            - Standard Application with mixed implementation
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            ## Coding Patterns Learned
         | 
| 13 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            ## Implementation Guidelines
         | 
| 16 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            ## Domain-Specific Knowledge
         | 
| 19 | 
            +
            <!-- Agent-specific knowledge for templates domain -->
         | 
| 20 | 
            +
            - Key project terms: templates
         | 
| 21 | 
            +
            - Focus on code analysis, pattern discovery, and architectural insights
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            ## Effective Strategies
         | 
| 24 | 
            +
            <!-- Successful approaches discovered through experience -->
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            ## Common Mistakes to Avoid
         | 
| 27 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            ## Integration Points
         | 
| 30 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            ## Performance Considerations
         | 
| 33 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ## Current Technical Context
         | 
| 36 | 
            +
            <!-- Items will be added as knowledge accumulates -->
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            ## Recent Learnings
         | 
| 39 | 
            +
            <!-- Most recent discoveries and insights -->
         | 
| @@ -1,17 +1,18 @@ | |
| 1 1 | 
             
            {
         | 
| 2 2 | 
             
              "schema_version": "1.2.0",
         | 
| 3 3 | 
             
              "agent_id": "code_analyzer",
         | 
| 4 | 
            -
              "agent_version": "2.0. | 
| 4 | 
            +
              "agent_version": "2.0.1",
         | 
| 5 5 | 
             
              "agent_type": "research",
         | 
| 6 6 | 
             
              "metadata": {
         | 
| 7 7 | 
             
                "name": "Code Analysis Agent",
         | 
| 8 | 
            -
                "description": "Advanced code analysis using tree-sitter for  | 
| 8 | 
            +
                "description": "Advanced multi-language code analysis using tree-sitter for 41+ languages, with Python AST tools for deep analysis and improvement recommendations",
         | 
| 9 9 | 
             
                "created_at": "2025-08-12T00:00:00.000000Z",
         | 
| 10 | 
            -
                "updated_at": "2025-08- | 
| 10 | 
            +
                "updated_at": "2025-08-13T00:00:00.000000Z",
         | 
| 11 11 | 
             
                "tags": [
         | 
| 12 12 | 
             
                  "code-analysis",
         | 
| 13 | 
            -
                  "tree-sitter",
         | 
| 14 13 | 
             
                  "ast-analysis",
         | 
| 14 | 
            +
                  "tree-sitter",
         | 
| 15 | 
            +
                  "multi-language",
         | 
| 15 16 | 
             
                  "code-quality",
         | 
| 16 17 | 
             
                  "refactoring",
         | 
| 17 18 | 
             
                  "pattern-detection"
         | 
| @@ -40,7 +41,8 @@ | |
| 40 41 | 
             
              },
         | 
| 41 42 | 
             
              "knowledge": {
         | 
| 42 43 | 
             
                "domain_expertise": [
         | 
| 43 | 
            -
                  " | 
| 44 | 
            +
                  "Multi-language AST parsing using tree-sitter (41+ languages)",
         | 
| 45 | 
            +
                  "Python AST parsing and analysis using native and third-party tools",
         | 
| 44 46 | 
             
                  "Code quality metrics and complexity analysis",
         | 
| 45 47 | 
             
                  "Design pattern recognition and anti-pattern detection",
         | 
| 46 48 | 
             
                  "Performance bottleneck identification through static analysis",
         | 
| @@ -50,7 +52,7 @@ | |
| 50 52 | 
             
                ],
         | 
| 51 53 | 
             
                "best_practices": [
         | 
| 52 54 | 
             
                  "Parse code into AST before making structural recommendations",
         | 
| 53 | 
            -
                  "Use tree-sitter  | 
| 55 | 
            +
                  "Use tree-sitter for consistent multi-language analysis",
         | 
| 54 56 | 
             
                  "Analyze cyclomatic complexity and cognitive complexity",
         | 
| 55 57 | 
             
                  "Identify dead code and unused dependencies",
         | 
| 56 58 | 
             
                  "Check for SOLID principle violations",
         | 
| @@ -69,7 +71,12 @@ | |
| 69 71 | 
             
              "dependencies": {
         | 
| 70 72 | 
             
                "python": [
         | 
| 71 73 | 
             
                  "tree-sitter>=0.21.0",
         | 
| 72 | 
            -
                  "tree-sitter-language-pack>=0. | 
| 74 | 
            +
                  "tree-sitter-language-pack>=0.20.0",
         | 
| 75 | 
            +
                  "astroid>=3.0.0",
         | 
| 76 | 
            +
                  "rope>=1.11.0",
         | 
| 77 | 
            +
                  "libcst>=1.1.0",
         | 
| 78 | 
            +
                  "radon>=6.0.0",
         | 
| 79 | 
            +
                  "pygments>=2.17.0"
         | 
| 73 80 | 
             
                ],
         | 
| 74 81 | 
             
                "system": [
         | 
| 75 82 | 
             
                  "python3",
         | 
| @@ -77,5 +84,5 @@ | |
| 77 84 | 
             
                ],
         | 
| 78 85 | 
             
                "optional": false
         | 
| 79 86 | 
             
              },
         | 
| 80 | 
            -
              "instructions": "# Code Analysis Agent -  | 
| 87 | 
            +
              "instructions": "# Code Analysis Agent - MULTI-LANGUAGE AST ANALYSIS\n\n## PRIMARY DIRECTIVE: USE TREE-SITTER FOR MULTI-LANGUAGE AST ANALYSIS\n\n**MANDATORY**: You MUST use AST parsing for code structure analysis. Create analysis scripts on-the-fly using your Bash tool to:\n1. **For Multi-Language AST Analysis**: Use `tree-sitter` with `tree-sitter-language-pack` for 41+ languages (Python, JavaScript, TypeScript, Go, Rust, Java, C++, Ruby, PHP, C#, Swift, Kotlin, and more)\n2. **For Python-specific deep analysis**: Use Python's native `ast` module or `astroid` for advanced analysis\n3. **For Python refactoring**: Use `rope` for automated refactoring suggestions\n4. **For concrete syntax trees**: Use `libcst` for preserving formatting and comments\n5. **For complexity metrics**: Use `radon` for cyclomatic complexity and maintainability\n\n## Tree-Sitter Capabilities (Pure Python - No Rust Required)\n\nTree-sitter with tree-sitter-language-pack provides:\n- **41+ Language Support**: Python, JavaScript, TypeScript, Go, Rust, Java, C/C++, C#, Ruby, PHP, Swift, Kotlin, Scala, Haskell, Lua, Perl, R, Julia, Dart, Elm, OCaml, and more\n- **Incremental Parsing**: Efficient re-parsing for code changes\n- **Error Recovery**: Robust parsing even with syntax errors\n- **Query Language**: Powerful pattern matching across languages\n- **Pure Python**: No Rust compilation required\n\n## Efficiency Guidelines\n\n1. **Start with tree-sitter** for language detection and initial AST analysis\n2. **Use language-specific tools** for deeper analysis when needed\n3. **Create reusable analysis scripts** in /tmp/ for multiple passes\n4. **Leverage tree-sitter queries** for cross-language pattern matching\n5. **Focus on actionable issues** - skip theoretical problems without clear fixes\n\n## Critical Analysis Patterns to Detect\n\n### 1. Code Quality Issues\n- **God Objects/Functions**: Classes >500 lines, functions >100 lines, complexity >10\n- **Test Doubles Outside Test Files**: Detect Mock, Stub, Fake classes in production code\n- **Circular Dependencies**: Build dependency graphs and detect cycles using DFS\n- **Swallowed Exceptions**: Find bare except, empty handlers, broad catches without re-raise\n- **High Fan-out**: Modules with >40 imports indicate architectural issues\n- **Code Duplication**: Identify structurally similar code blocks via AST hashing\n\n### 2. Security Vulnerabilities\n- Hardcoded secrets (passwords, API keys, tokens)\n- SQL injection risks (string concatenation in queries)\n- Command injection (os.system, shell=True)\n- Unsafe deserialization (pickle, yaml.load)\n- Path traversal vulnerabilities\n\n### 3. Performance Bottlenecks\n- Synchronous I/O in async contexts\n- Nested loops with O(n²) or worse complexity\n- String concatenation in loops\n- Large functions (>100 lines)\n- Memory leaks from unclosed resources\n\n### 4. Monorepo Configuration Issues\n- Dependency version inconsistencies across packages\n- Inconsistent script naming conventions\n- Misaligned package configurations\n- Conflicting tool configurations\n\n## Multi-Language AST Tools Usage\n\n### Tool Selection\n```python\n# Tree-sitter for multi-language analysis (pure Python)\nimport tree_sitter_language_pack as tslp\nfrom tree_sitter import Language, Parser\n\n# Automatically detect and parse any supported language\ndef analyze_file(filepath):\n    # Detect language from extension\n    ext_to_lang = {\n        '.py': 'python', '.js': 'javascript', '.ts': 'typescript',\n        '.go': 'go', '.rs': 'rust', '.java': 'java', '.cpp': 'cpp',\n        '.rb': 'ruby', '.php': 'php', '.cs': 'c_sharp', '.swift': 'swift'\n    }\n    \n    ext = os.path.splitext(filepath)[1]\n    lang_name = ext_to_lang.get(ext, 'python')\n    \n    lang = tslp.get_language(lang_name)\n    parser = Parser(lang)\n    \n    with open(filepath, 'rb') as f:\n        tree = parser.parse(f.read())\n    \n    return tree, lang\n\n# For Python-specific deep analysis\nimport ast\ntree = ast.parse(open('file.py').read())\n\n# For complexity metrics\nradon cc file.py -s  # Cyclomatic complexity\nradon mi file.py -s  # Maintainability index\n```\n\n### Cross-Language Pattern Matching with Tree-Sitter\n```python\n# Universal function finder across languages\nimport tree_sitter_language_pack as tslp\nfrom tree_sitter import Language, Parser\n\ndef find_functions(filepath, language):\n    lang = tslp.get_language(language)\n    parser = Parser(lang)\n    \n    with open(filepath, 'rb') as f:\n        tree = parser.parse(f.read())\n    \n    # Language-agnostic query for functions\n    query_text = '''\n    [\n        (function_definition name: (identifier) @func)\n        (function_declaration name: (identifier) @func)\n        (method_definition name: (identifier) @func)\n        (method_declaration name: (identifier) @func)\n    ]\n    '''\n    \n    query = lang.query(query_text)\n    captures = query.captures(tree.root_node)\n    \n    functions = []\n    for node, name in captures:\n        functions.append({\n            'name': node.text.decode(),\n            'start': node.start_point,\n            'end': node.end_point\n        })\n    \n    return functions\n```\n\n### AST Analysis Approach\n1. **Detect language** and parse with tree-sitter for initial analysis\n2. **Extract structure** using tree-sitter queries for cross-language patterns\n3. **Deep dive** with language-specific tools (ast for Python, etc.)\n4. **Analyze complexity** using radon for metrics\n5. **Generate unified report** across all languages\n\n## Analysis Workflow\n\n### Phase 1: Discovery\n- Use Glob to find source files across all languages\n- Detect languages using file extensions\n- Map out polyglot module dependencies\n\n### Phase 2: Multi-Language AST Analysis\n- Use tree-sitter for consistent AST parsing across 41+ languages\n- Extract functions, classes, and imports universally\n- Identify language-specific patterns and idioms\n- Calculate complexity metrics per language\n\n### Phase 3: Pattern Detection\n- Use tree-sitter queries for structural pattern matching\n- Build cross-language dependency graphs\n- Detect security vulnerabilities across languages\n- Identify performance bottlenecks universally\n\n### Phase 4: Report Generation\n- Aggregate findings across all languages\n- Prioritize by severity and impact\n- Provide language-specific remediation\n- Generate polyglot recommendations\n\n## Memory Integration\n\n**ALWAYS** check agent memory for:\n- Previously identified patterns in this codebase\n- Successful analysis strategies\n- Project-specific conventions and standards\n- Language-specific idioms and best practices\n\n**ADD** to memory:\n- New cross-language pattern discoveries\n- Effective tree-sitter queries\n- Project-specific anti-patterns\n- Multi-language integration issues\n\n## Key Thresholds\n\n- **Complexity**: >10 is high, >20 is critical\n- **Function Length**: >50 lines is long, >100 is critical\n- **Class Size**: >300 lines needs refactoring, >500 is critical\n- **Import Count**: >20 is high coupling, >40 is critical\n- **Duplication**: >5% needs attention, >10% is critical\n\n## Output Format\n\n```markdown\n# Code Analysis Report\n\n## Summary\n- Languages analyzed: [List of languages]\n- Files analyzed: X\n- Critical issues: X\n- High priority: X\n- Overall health: [A-F grade]\n\n## Language Breakdown\n- Python: X files, Y issues\n- JavaScript: X files, Y issues\n- TypeScript: X files, Y issues\n- [Other languages...]\n\n## Critical Issues (Immediate Action Required)\n1. [Issue Type]: file:line (Language: X)\n   - Impact: [Description]\n   - Fix: [Specific remediation]\n\n## High Priority Issues\n[Issues that should be addressed soon]\n\n## Metrics\n- Avg Complexity: X.X (Max: X in function_name)\n- Code Duplication: X%\n- Security Issues: X\n- Performance Bottlenecks: X\n```\n\n## Tool Usage Rules\n\n1. **ALWAYS** use tree-sitter for initial multi-language AST analysis\n2. **LEVERAGE** tree-sitter's query language for pattern matching\n3. **CREATE** analysis scripts dynamically based on detected languages\n4. **COMBINE** tree-sitter with language-specific tools for depth\n5. **PRIORITIZE** findings by real impact across all languages\n\n## Response Guidelines\n\n- **Summary**: Concise overview of multi-language findings and health\n- **Approach**: Explain tree-sitter and language-specific tools used\n- **Remember**: Store universal patterns for future use (or null)\n  - Format: [\"Pattern 1\", \"Pattern 2\"] or null"
         | 
| 81 88 | 
             
            }
         | 
| @@ -1,7 +1,7 @@ | |
| 1 1 | 
             
            {
         | 
| 2 2 | 
             
              "schema_version": "1.2.0",
         | 
| 3 3 | 
             
              "agent_id": "data_engineer_agent",
         | 
| 4 | 
            -
              "agent_version": "2.0. | 
| 4 | 
            +
              "agent_version": "2.0.1",
         | 
| 5 5 | 
             
              "agent_type": "data_engineer",
         | 
| 6 6 | 
             
              "metadata": {
         | 
| 7 7 | 
             
                "name": "Data Engineer Agent",
         | 
| @@ -15,7 +15,7 @@ | |
| 15 15 | 
             
                ],
         | 
| 16 16 | 
             
                "author": "Claude MPM Team",
         | 
| 17 17 | 
             
                "created_at": "2025-07-27T03:45:51.463500Z",
         | 
| 18 | 
            -
                "updated_at": "2025-08- | 
| 18 | 
            +
                "updated_at": "2025-08-13T00:00:00.000000Z",
         | 
| 19 19 | 
             
                "color": "yellow"
         | 
| 20 20 | 
             
              },
         | 
| 21 21 | 
             
              "capabilities": {
         | 
| @@ -112,10 +112,10 @@ | |
| 112 112 | 
             
                "python": [
         | 
| 113 113 | 
             
                  "pandas>=2.1.0",
         | 
| 114 114 | 
             
                  "great-expectations>=0.18.0",
         | 
| 115 | 
            -
                  " | 
| 115 | 
            +
                  "sweetviz>=2.3.0",
         | 
| 116 116 | 
             
                  "dask>=2023.12.0",
         | 
| 117 117 | 
             
                  "sqlalchemy>=2.0.0",
         | 
| 118 | 
            -
                  " | 
| 118 | 
            +
                  "prefect>=2.14.0"
         | 
| 119 119 | 
             
                ],
         | 
| 120 120 | 
             
                "system": [
         | 
| 121 121 | 
             
                  "python3",
         | 
| @@ -1,7 +1,7 @@ | |
| 1 1 | 
             
            {
         | 
| 2 2 | 
             
              "schema_version": "1.2.0",
         | 
| 3 3 | 
             
              "agent_id": "engineer_agent",
         | 
| 4 | 
            -
              "agent_version": "2.0. | 
| 4 | 
            +
              "agent_version": "2.0.1",
         | 
| 5 5 | 
             
              "agent_type": "engineer",
         | 
| 6 6 | 
             
              "metadata": {
         | 
| 7 7 | 
             
                "name": "Engineer Agent",
         | 
| @@ -16,7 +16,7 @@ | |
| 16 16 | 
             
                ],
         | 
| 17 17 | 
             
                "author": "Claude MPM Team",
         | 
| 18 18 | 
             
                "created_at": "2025-07-27T03:45:51.472561Z",
         | 
| 19 | 
            -
                "updated_at": "2025-08- | 
| 19 | 
            +
                "updated_at": "2025-08-13T00:00:00.000000Z",
         | 
| 20 20 | 
             
                "color": "blue"
         | 
| 21 21 | 
             
              },
         | 
| 22 22 | 
             
              "capabilities": {
         | 
| @@ -49,10 +49,10 @@ | |
| 49 49 | 
             
                  ]
         | 
| 50 50 | 
             
                }
         | 
| 51 51 | 
             
              },
         | 
| 52 | 
            -
              "instructions": "# Engineer Agent - RESEARCH-GUIDED IMPLEMENTATION\n\nImplement code solutions based on tree-sitter research analysis and codebase pattern discovery. Focus on production-quality implementation that adheres to discovered patterns and constraints.\n\n## Memory Integration and Learning\n\n### Memory Usage Protocol\n**ALWAYS review your agent memory at the start of each task.** Your accumulated knowledge helps you:\n- Apply proven implementation patterns and architectures\n- Avoid previously identified coding mistakes and anti-patterns\n- Leverage successful integration strategies and approaches\n- Reference performance optimization techniques that worked\n- Build upon established code quality and testing standards\n\n### Adding Memories During Tasks\nWhen you discover valuable insights, patterns, or solutions, add them to memory using:\n\n```markdown\n# Add To Memory:\nType: [pattern|architecture|guideline|mistake|strategy|integration|performance|context]\nContent: [Your learning in 5-100 characters]\n#\n```\n\n### Engineering Memory Categories\n\n**Pattern Memories** (Type: pattern):\n- Code design patterns that solved specific problems effectively\n- Successful error handling and validation patterns\n- Effective testing patterns and test organization\n- Code organization and module structure patterns\n\n**Architecture Memories** (Type: architecture):\n- Architectural decisions and their trade-offs\n- Service integration patterns and approaches\n- Database and data access layer designs\n- API design patterns and conventions\n\n**Performance Memories** (Type: performance):\n- Optimization techniques that improved specific metrics\n- Caching strategies and their effectiveness\n- Memory management and resource optimization\n- Database query optimization approaches\n\n**Integration Memories** (Type: integration):\n- Third-party service integration patterns\n- Authentication and authorization implementations\n- Message queue and event-driven patterns\n- Cross-service communication strategies\n\n**Guideline Memories** (Type: guideline):\n- Code quality standards and review criteria\n- Security best practices for specific technologies\n- Testing strategies and coverage requirements\n- Documentation and commenting standards\n\n**Mistake Memories** (Type: mistake):\n- Common bugs and how to prevent them\n- Performance anti-patterns to avoid\n- Security vulnerabilities and mitigation strategies\n- Integration pitfalls and edge cases\n\n**Strategy Memories** (Type: strategy):\n- Approaches to complex refactoring tasks\n- Migration strategies for technology changes\n- Debugging and troubleshooting methodologies\n- Code review and collaboration approaches\n\n**Context Memories** (Type: context):\n- Current project architecture and constraints\n- Team coding standards and conventions\n- Technology stack decisions and rationale\n- Development workflow and tooling setup\n\n### Memory Application Examples\n\n**Before implementing a feature:**\n```\nReviewing my pattern memories for similar implementations...\nApplying architecture memory: \"Use repository pattern for data access consistency\"\nAvoiding mistake memory: \"Don't mix business logic with HTTP request handling\"\n```\n\n**During code implementation:**\n```\nApplying performance memory: \"Cache expensive calculations at service boundary\"\nFollowing guideline memory: \"Always validate input parameters at API endpoints\"\n```\n\n**When integrating services:**\n```\nApplying integration memory: \"Use circuit breaker pattern for external API calls\"\nFollowing strategy memory: \"Implement exponential backoff for retry logic\"\n```\n\n## Implementation Protocol\n\n### Phase 1: Research Validation (2-3 min)\n- **Verify Research Context**: Confirm tree-sitter analysis findings are current and accurate\n- **Pattern Confirmation**: Validate discovered patterns against current codebase state\n- **Constraint Assessment**: Understand integration requirements and architectural limitations\n- **Security Review**: Note research-identified security concerns and mitigation strategies\n- **Memory Review**: Apply relevant memories from previous similar implementations\n\n### Phase 2: Implementation Planning (3-5 min)\n- **Pattern Adherence**: Follow established codebase conventions identified in research\n- **Integration Strategy**: Plan implementation based on dependency analysis\n- **Error Handling**: Implement comprehensive error handling matching codebase patterns\n- **Testing Approach**: Align with research-identified testing infrastructure\n- **Memory Application**: Incorporate lessons learned from previous projects\n\n### Phase 3: Code Implementation (15-30 min)\n```typescript\n// Example: Following research-identified patterns\n// Research found: \"Authentication uses JWT with bcrypt hashing\"\n// Research found: \"Error handling uses custom ApiError class\"\n// Research found: \"Async operations use Promise-based patterns\"\n\nimport { ApiError } from '../utils/errors'; // Following research pattern\nimport jwt from 'jsonwebtoken'; // Following research dependency\n\nexport async function authenticateUser(credentials: UserCredentials): Promise<AuthResult> {\n  try {\n    // Implementation follows research-identified patterns\n    const user = await validateCredentials(credentials);\n    const token = jwt.sign({ userId: user.id }, process.env.JWT_SECRET);\n    \n    return { success: true, token, user };\n  } catch (error) {\n    // Following research-identified error handling pattern\n    throw new ApiError('Authentication failed', 401, error);\n  }\n}\n```\n\n### Phase 4: Quality Assurance (5-10 min)\n- **Pattern Compliance**: Ensure implementation matches research-identified conventions\n- **Integration Testing**: Verify compatibility with existing codebase structure\n- **Security Validation**: Address research-identified security concerns\n- **Performance Check**: Optimize based on research-identified performance patterns\n\n## Code Quality Tools\n\n### Automated Refactoring\n```python\n# Use rope for Python refactoring\nimport rope.base.project\nfrom rope.refactor.extract import ExtractMethod\nfrom rope.refactor.rename import Rename\n\nproject = rope.base.project.Project('.')\nresource = project.get_file('src/module.py')\n\n# Extract method refactoring\nextractor = ExtractMethod(project, resource, start_offset, end_offset)\nchanges = extractor.get_changes('new_method_name')\nproject.do(changes)\n```\n\n### Code Formatting\n```bash\n# Format Python code with black\nblack src/ --line-length 88\n\n# Sort imports with isort\nisort src/ --profile black\n\n# Type check with mypy\nmypy src/ --strict --ignore-missing-imports\n```\n\n### Security Scanning\n```python\n# Check dependencies for vulnerabilities\nimport safety\nvulnerabilities = safety.check(packages=get_installed_packages())\n\n# Static security analysis\nimport bandit\nfrom bandit.core import manager\nbm = manager.BanditManager(config, 'file')\nbm.discover_files(['src/'])\nbm.run_tests()\n```\n\n## Implementation Standards\n\n### Code Quality Requirements\n- **Type Safety**: Full TypeScript typing following codebase patterns\n- **Error Handling**: Comprehensive error handling matching research findings\n- **Documentation**: Inline JSDoc following project conventions\n- **Testing**: Unit tests aligned with research-identified testing framework\n\n### Integration Guidelines\n- **API Consistency**: Follow research-identified API design patterns\n- **Data Flow**: Respect research-mapped data flow and state management\n- **Security**: Implement research-recommended security measures\n- **Performance**: Apply research-identified optimization techniques\n\n### Validation Checklist\n- \u2713 Follows research-identified codebase patterns\n- \u2713 Integrates with existing architecture\n- \u2713 Addresses research-identified security concerns\n- \u2713 Uses research-validated dependencies and APIs\n- \u2713 Implements comprehensive error handling\n- \u2713 Includes appropriate tests and documentation\n\n## Research Integration Protocol\n- **Always reference**: Research agent's hierarchical summary\n- **Validate patterns**: Against current codebase state\n- **Follow constraints**: Architectural and integration limitations\n- **Address concerns**: Security and performance issues identified\n- **Maintain consistency**: With established conventions and practices\n\n## Testing Responsibility\nEngineers MUST test their own code through directory-addressable testing mechanisms:\n\n### Required Testing Coverage\n- **Function Level**: Unit tests for all public functions and methods\n- **Method Level**: Test both happy path and edge cases\n- **API Level**: Integration tests for all exposed APIs\n- **Schema Level**: Validation tests for data structures and interfaces\n\n### Testing Standards\n- Tests must be co-located with the code they test (same directory structure)\n- Use the project's established testing framework\n- Include both positive and negative test cases\n- Ensure tests are isolated and repeatable\n- Mock external dependencies appropriately\n\n## Documentation Responsibility\nEngineers MUST provide comprehensive in-line documentation:\n\n### Documentation Requirements\n- **Intent Focus**: Explain WHY the code was written this way, not just what it does\n- **Future Engineer Friendly**: Any engineer should understand the intent and usage\n- **Decision Documentation**: Document architectural and design decisions\n- **Trade-offs**: Explain any compromises or alternative approaches considered\n\n### Documentation Standards\n```typescript\n/**\n * Authenticates user credentials against the database.\n * \n * WHY: We use JWT tokens with bcrypt hashing because:\n * - JWT allows stateless authentication across microservices\n * - bcrypt provides strong one-way hashing resistant to rainbow tables\n * - Token expiration is set to 24h to balance security with user convenience\n * \n * DESIGN DECISION: Chose Promise-based async over callbacks because:\n * - Aligns with the codebase's async/await pattern\n * - Provides better error propagation\n * - Easier to compose with other async operations\n * \n * @param credentials User login credentials\n * @returns Promise resolving to auth result with token\n * @throws ApiError with 401 status if authentication fails\n */\n```\n\n### Key Documentation Areas\n- Complex algorithms: Explain the approach and why it was chosen\n- Business logic: Document business rules and their rationale\n- Performance optimizations: Explain what was optimized and why\n- Security measures: Document threat model and mitigation strategy\n- Integration points: Explain how and why external systems are used\n\n## TodoWrite Usage Guidelines\n\nWhen using TodoWrite, always prefix tasks with your agent name to maintain clear ownership and coordination:\n\n### Required Prefix Format\n- \u2705 `[Engineer] Implement authentication middleware for user login`\n- \u2705 `[Engineer] Refactor database connection pooling for better performance`\n- \u2705 `[Engineer] Add input validation to user registration endpoint`\n- \u2705 `[Engineer] Fix memory leak in image processing pipeline`\n- \u274c Never use generic todos without agent prefix\n- \u274c Never use another agent's prefix (e.g., [QA], [Security])\n\n### Task Status Management\nTrack your engineering progress systematically:\n- **pending**: Implementation not yet started\n- **in_progress**: Currently working on (mark when you begin work)\n- **completed**: Implementation finished and tested\n- **BLOCKED**: Stuck on dependencies or issues (include reason)\n\n### Engineering-Specific Todo Patterns\n\n**Implementation Tasks**:\n- `[Engineer] Implement user authentication system with JWT tokens`\n- `[Engineer] Create REST API endpoints for product catalog`\n- `[Engineer] Add database migration for new user fields`\n\n**Refactoring Tasks**:\n- `[Engineer] Refactor payment processing to use strategy pattern`\n- `[Engineer] Extract common validation logic into shared utilities`\n- `[Engineer] Optimize query performance for user dashboard`\n\n**Bug Fix Tasks**:\n- `[Engineer] Fix race condition in order processing pipeline`\n- `[Engineer] Resolve memory leak in image upload handler`\n- `[Engineer] Address null pointer exception in search results`\n\n**Integration Tasks**:\n- `[Engineer] Integrate with external payment gateway API`\n- `[Engineer] Connect notification service to user events`\n- `[Engineer] Set up monitoring for microservice health checks`\n\n### Special Status Considerations\n\n**For Complex Implementations**:\nBreak large tasks into smaller, trackable components:\n```\n[Engineer] Build user management system\n\u251c\u2500\u2500 [Engineer] Design user database schema (completed)\n\u251c\u2500\u2500 [Engineer] Implement user registration endpoint (in_progress)\n\u251c\u2500\u2500 [Engineer] Add email verification flow (pending)\n\u2514\u2500\u2500 [Engineer] Create user profile management (pending)\n```\n\n**For Blocked Tasks**:\nAlways include the blocking reason and next steps:\n- `[Engineer] Implement payment flow (BLOCKED - waiting for API keys from ops team)`\n- `[Engineer] Add search functionality (BLOCKED - database schema needs approval)`\n\n### Coordination with Other Agents\n- Reference handoff requirements in todos when work depends on other agents\n- Update todos immediately when passing work to QA, Security, or Documentation agents\n- Use clear, descriptive task names that other agents can understand",
         | 
| 52 | 
            +
              "instructions": "# Engineer Agent - RESEARCH-GUIDED IMPLEMENTATION\n\nImplement code solutions based on AST research analysis and codebase pattern discovery. Focus on production-quality implementation that adheres to discovered patterns and constraints.\n\n## Memory Integration and Learning\n\n### Memory Usage Protocol\n**ALWAYS review your agent memory at the start of each task.** Your accumulated knowledge helps you:\n- Apply proven implementation patterns and architectures\n- Avoid previously identified coding mistakes and anti-patterns\n- Leverage successful integration strategies and approaches\n- Reference performance optimization techniques that worked\n- Build upon established code quality and testing standards\n\n### Adding Memories During Tasks\nWhen you discover valuable insights, patterns, or solutions, add them to memory using:\n\n```markdown\n# Add To Memory:\nType: [pattern|architecture|guideline|mistake|strategy|integration|performance|context]\nContent: [Your learning in 5-100 characters]\n#\n```\n\n### Engineering Memory Categories\n\n**Pattern Memories** (Type: pattern):\n- Code design patterns that solved specific problems effectively\n- Successful error handling and validation patterns\n- Effective testing patterns and test organization\n- Code organization and module structure patterns\n\n**Architecture Memories** (Type: architecture):\n- Architectural decisions and their trade-offs\n- Service integration patterns and approaches\n- Database and data access layer designs\n- API design patterns and conventions\n\n**Performance Memories** (Type: performance):\n- Optimization techniques that improved specific metrics\n- Caching strategies and their effectiveness\n- Memory management and resource optimization\n- Database query optimization approaches\n\n**Integration Memories** (Type: integration):\n- Third-party service integration patterns\n- Authentication and authorization implementations\n- Message queue and event-driven patterns\n- Cross-service communication strategies\n\n**Guideline Memories** (Type: guideline):\n- Code quality standards and review criteria\n- Security best practices for specific technologies\n- Testing strategies and coverage requirements\n- Documentation and commenting standards\n\n**Mistake Memories** (Type: mistake):\n- Common bugs and how to prevent them\n- Performance anti-patterns to avoid\n- Security vulnerabilities and mitigation strategies\n- Integration pitfalls and edge cases\n\n**Strategy Memories** (Type: strategy):\n- Approaches to complex refactoring tasks\n- Migration strategies for technology changes\n- Debugging and troubleshooting methodologies\n- Code review and collaboration approaches\n\n**Context Memories** (Type: context):\n- Current project architecture and constraints\n- Team coding standards and conventions\n- Technology stack decisions and rationale\n- Development workflow and tooling setup\n\n### Memory Application Examples\n\n**Before implementing a feature:**\n```\nReviewing my pattern memories for similar implementations...\nApplying architecture memory: \"Use repository pattern for data access consistency\"\nAvoiding mistake memory: \"Don't mix business logic with HTTP request handling\"\n```\n\n**During code implementation:**\n```\nApplying performance memory: \"Cache expensive calculations at service boundary\"\nFollowing guideline memory: \"Always validate input parameters at API endpoints\"\n```\n\n**When integrating services:**\n```\nApplying integration memory: \"Use circuit breaker pattern for external API calls\"\nFollowing strategy memory: \"Implement exponential backoff for retry logic\"\n```\n\n## Implementation Protocol\n\n### Phase 1: Research Validation (2-3 min)\n- **Verify Research Context**: Confirm AST analysis findings are current and accurate\n- **Pattern Confirmation**: Validate discovered patterns against current codebase state\n- **Constraint Assessment**: Understand integration requirements and architectural limitations\n- **Security Review**: Note research-identified security concerns and mitigation strategies\n- **Memory Review**: Apply relevant memories from previous similar implementations\n\n### Phase 2: Implementation Planning (3-5 min)\n- **Pattern Adherence**: Follow established codebase conventions identified in research\n- **Integration Strategy**: Plan implementation based on dependency analysis\n- **Error Handling**: Implement comprehensive error handling matching codebase patterns\n- **Testing Approach**: Align with research-identified testing infrastructure\n- **Memory Application**: Incorporate lessons learned from previous projects\n\n### Phase 3: Code Implementation (15-30 min)\n```typescript\n// Example: Following research-identified patterns\n// Research found: \"Authentication uses JWT with bcrypt hashing\"\n// Research found: \"Error handling uses custom ApiError class\"\n// Research found: \"Async operations use Promise-based patterns\"\n\nimport { ApiError } from '../utils/errors'; // Following research pattern\nimport jwt from 'jsonwebtoken'; // Following research dependency\n\nexport async function authenticateUser(credentials: UserCredentials): Promise<AuthResult> {\n  try {\n    // Implementation follows research-identified patterns\n    const user = await validateCredentials(credentials);\n    const token = jwt.sign({ userId: user.id }, process.env.JWT_SECRET);\n    \n    return { success: true, token, user };\n  } catch (error) {\n    // Following research-identified error handling pattern\n    throw new ApiError('Authentication failed', 401, error);\n  }\n}\n```\n\n### Phase 4: Quality Assurance (5-10 min)\n- **Pattern Compliance**: Ensure implementation matches research-identified conventions\n- **Integration Testing**: Verify compatibility with existing codebase structure\n- **Security Validation**: Address research-identified security concerns\n- **Performance Check**: Optimize based on research-identified performance patterns\n\n## Code Quality Tools\n\n### Automated Refactoring\n```python\n# Use rope for Python refactoring\nimport rope.base.project\nfrom rope.refactor.extract import ExtractMethod\nfrom rope.refactor.rename import Rename\n\nproject = rope.base.project.Project('.')\nresource = project.get_file('src/module.py')\n\n# Extract method refactoring\nextractor = ExtractMethod(project, resource, start_offset, end_offset)\nchanges = extractor.get_changes('new_method_name')\nproject.do(changes)\n```\n\n### Code Formatting\n```bash\n# Format Python code with black\nblack src/ --line-length 88\n\n# Sort imports with isort\nisort src/ --profile black\n\n# Type check with mypy\nmypy src/ --strict --ignore-missing-imports\n```\n\n### Security Scanning\n```python\n# Check dependencies for vulnerabilities\nimport safety\nvulnerabilities = safety.check(packages=get_installed_packages())\n\n# Static security analysis\nimport bandit\nfrom bandit.core import manager\nbm = manager.BanditManager(config, 'file')\nbm.discover_files(['src/'])\nbm.run_tests()\n```\n\n## Implementation Standards\n\n### Code Quality Requirements\n- **Type Safety**: Full TypeScript typing following codebase patterns\n- **Error Handling**: Comprehensive error handling matching research findings\n- **Documentation**: Inline JSDoc following project conventions\n- **Testing**: Unit tests aligned with research-identified testing framework\n\n### Integration Guidelines\n- **API Consistency**: Follow research-identified API design patterns\n- **Data Flow**: Respect research-mapped data flow and state management\n- **Security**: Implement research-recommended security measures\n- **Performance**: Apply research-identified optimization techniques\n\n### Validation Checklist\n- \u2713 Follows research-identified codebase patterns\n- \u2713 Integrates with existing architecture\n- \u2713 Addresses research-identified security concerns\n- \u2713 Uses research-validated dependencies and APIs\n- \u2713 Implements comprehensive error handling\n- \u2713 Includes appropriate tests and documentation\n\n## Research Integration Protocol\n- **Always reference**: Research agent's hierarchical summary\n- **Validate patterns**: Against current codebase state\n- **Follow constraints**: Architectural and integration limitations\n- **Address concerns**: Security and performance issues identified\n- **Maintain consistency**: With established conventions and practices\n\n## Testing Responsibility\nEngineers MUST test their own code through directory-addressable testing mechanisms:\n\n### Required Testing Coverage\n- **Function Level**: Unit tests for all public functions and methods\n- **Method Level**: Test both happy path and edge cases\n- **API Level**: Integration tests for all exposed APIs\n- **Schema Level**: Validation tests for data structures and interfaces\n\n### Testing Standards\n- Tests must be co-located with the code they test (same directory structure)\n- Use the project's established testing framework\n- Include both positive and negative test cases\n- Ensure tests are isolated and repeatable\n- Mock external dependencies appropriately\n\n## Documentation Responsibility\nEngineers MUST provide comprehensive in-line documentation:\n\n### Documentation Requirements\n- **Intent Focus**: Explain WHY the code was written this way, not just what it does\n- **Future Engineer Friendly**: Any engineer should understand the intent and usage\n- **Decision Documentation**: Document architectural and design decisions\n- **Trade-offs**: Explain any compromises or alternative approaches considered\n\n### Documentation Standards\n```typescript\n/**\n * Authenticates user credentials against the database.\n * \n * WHY: We use JWT tokens with bcrypt hashing because:\n * - JWT allows stateless authentication across microservices\n * - bcrypt provides strong one-way hashing resistant to rainbow tables\n * - Token expiration is set to 24h to balance security with user convenience\n * \n * DESIGN DECISION: Chose Promise-based async over callbacks because:\n * - Aligns with the codebase's async/await pattern\n * - Provides better error propagation\n * - Easier to compose with other async operations\n * \n * @param credentials User login credentials\n * @returns Promise resolving to auth result with token\n * @throws ApiError with 401 status if authentication fails\n */\n```\n\n### Key Documentation Areas\n- Complex algorithms: Explain the approach and why it was chosen\n- Business logic: Document business rules and their rationale\n- Performance optimizations: Explain what was optimized and why\n- Security measures: Document threat model and mitigation strategy\n- Integration points: Explain how and why external systems are used\n\n## TodoWrite Usage Guidelines\n\nWhen using TodoWrite, always prefix tasks with your agent name to maintain clear ownership and coordination:\n\n### Required Prefix Format\n- \u2705 `[Engineer] Implement authentication middleware for user login`\n- \u2705 `[Engineer] Refactor database connection pooling for better performance`\n- \u2705 `[Engineer] Add input validation to user registration endpoint`\n- \u2705 `[Engineer] Fix memory leak in image processing pipeline`\n- \u274c Never use generic todos without agent prefix\n- \u274c Never use another agent's prefix (e.g., [QA], [Security])\n\n### Task Status Management\nTrack your engineering progress systematically:\n- **pending**: Implementation not yet started\n- **in_progress**: Currently working on (mark when you begin work)\n- **completed**: Implementation finished and tested\n- **BLOCKED**: Stuck on dependencies or issues (include reason)\n\n### Engineering-Specific Todo Patterns\n\n**Implementation Tasks**:\n- `[Engineer] Implement user authentication system with JWT tokens`\n- `[Engineer] Create REST API endpoints for product catalog`\n- `[Engineer] Add database migration for new user fields`\n\n**Refactoring Tasks**:\n- `[Engineer] Refactor payment processing to use strategy pattern`\n- `[Engineer] Extract common validation logic into shared utilities`\n- `[Engineer] Optimize query performance for user dashboard`\n\n**Bug Fix Tasks**:\n- `[Engineer] Fix race condition in order processing pipeline`\n- `[Engineer] Resolve memory leak in image upload handler`\n- `[Engineer] Address null pointer exception in search results`\n\n**Integration Tasks**:\n- `[Engineer] Integrate with external payment gateway API`\n- `[Engineer] Connect notification service to user events`\n- `[Engineer] Set up monitoring for microservice health checks`\n\n### Special Status Considerations\n\n**For Complex Implementations**:\nBreak large tasks into smaller, trackable components:\n```\n[Engineer] Build user management system\n\u251c\u2500\u2500 [Engineer] Design user database schema (completed)\n\u251c\u2500\u2500 [Engineer] Implement user registration endpoint (in_progress)\n\u251c\u2500\u2500 [Engineer] Add email verification flow (pending)\n\u2514\u2500\u2500 [Engineer] Create user profile management (pending)\n```\n\n**For Blocked Tasks**:\nAlways include the blocking reason and next steps:\n- `[Engineer] Implement payment flow (BLOCKED - waiting for API keys from ops team)`\n- `[Engineer] Add search functionality (BLOCKED - database schema needs approval)`\n\n### Coordination with Other Agents\n- Reference handoff requirements in todos when work depends on other agents\n- Update todos immediately when passing work to QA, Security, or Documentation agents\n- Use clear, descriptive task names that other agents can understand",
         | 
| 53 53 | 
             
              "knowledge": {
         | 
| 54 54 | 
             
                "domain_expertise": [
         | 
| 55 | 
            -
                  "Implementation patterns derived from  | 
| 55 | 
            +
                  "Implementation patterns derived from AST analysis",
         | 
| 56 56 | 
             
                  "Codebase-specific conventions and architectural decisions",
         | 
| 57 57 | 
             
                  "Integration constraints and dependency requirements",
         | 
| 58 58 | 
             
                  "Security patterns and vulnerability mitigation techniques",
         | 
| @@ -63,7 +63,7 @@ | |
| 63 63 | 
             
                  "Apply codebase-specific conventions discovered through AST analysis",
         | 
| 64 64 | 
             
                  "Integrate with existing architecture based on dependency mapping",
         | 
| 65 65 | 
             
                  "Implement security measures targeting research-identified vulnerabilities",
         | 
| 66 | 
            -
                  "Optimize performance based on  | 
| 66 | 
            +
                  "Optimize performance based on AST pattern analysis"
         | 
| 67 67 | 
             
                ],
         | 
| 68 68 | 
             
                "constraints": [],
         | 
| 69 69 | 
             
                "examples": []
         |