claude-mpm 3.7.1__py3-none-any.whl → 3.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/INSTRUCTIONS.md +18 -0
  3. claude_mpm/agents/frontmatter_validator.py +116 -17
  4. claude_mpm/agents/schema/agent_schema.json +1 -1
  5. claude_mpm/{dashboard → agents}/templates/.claude-mpm/memories/engineer_agent.md +1 -1
  6. claude_mpm/{dashboard/templates/.claude-mpm/memories/version_control_agent.md → agents/templates/.claude-mpm/memories/qa_agent.md} +2 -2
  7. claude_mpm/agents/templates/.claude-mpm/memories/research_agent.md +39 -0
  8. claude_mpm/agents/templates/code_analyzer.json +34 -12
  9. claude_mpm/agents/templates/data_engineer.json +5 -8
  10. claude_mpm/agents/templates/documentation.json +2 -2
  11. claude_mpm/agents/templates/engineer.json +6 -6
  12. claude_mpm/agents/templates/ops.json +3 -8
  13. claude_mpm/agents/templates/qa.json +2 -3
  14. claude_mpm/agents/templates/research.json +12 -9
  15. claude_mpm/agents/templates/security.json +4 -7
  16. claude_mpm/agents/templates/ticketing.json +161 -0
  17. claude_mpm/agents/templates/version_control.json +3 -3
  18. claude_mpm/agents/templates/web_qa.json +214 -0
  19. claude_mpm/agents/templates/web_ui.json +176 -0
  20. claude_mpm/cli/commands/agents.py +118 -1
  21. claude_mpm/cli/parser.py +11 -0
  22. claude_mpm/cli/ticket_cli.py +31 -0
  23. claude_mpm/core/framework_loader.py +102 -49
  24. claude_mpm/dashboard/static/js/components/file-tool-tracker.js +46 -2
  25. claude_mpm/dashboard/templates/index.html +5 -5
  26. claude_mpm/services/agents/deployment/agent_deployment.py +9 -1
  27. claude_mpm/services/agents/deployment/async_agent_deployment.py +174 -13
  28. claude_mpm/services/agents/management/agent_capabilities_generator.py +21 -11
  29. claude_mpm/services/ticket_manager.py +207 -44
  30. claude_mpm/utils/agent_dependency_loader.py +66 -15
  31. claude_mpm/utils/robust_installer.py +587 -0
  32. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/METADATA +17 -21
  33. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/RECORD +37 -46
  34. claude_mpm/.claude-mpm/logs/hooks_20250728.log +0 -10
  35. claude_mpm/agents/agent-template.yaml +0 -83
  36. claude_mpm/agents/templates/test_integration.json +0 -113
  37. claude_mpm/cli/README.md +0 -108
  38. claude_mpm/cli_module/refactoring_guide.md +0 -253
  39. claude_mpm/config/async_logging_config.yaml +0 -145
  40. claude_mpm/core/.claude-mpm/logs/hooks_20250730.log +0 -34
  41. claude_mpm/dashboard/.claude-mpm/memories/README.md +0 -36
  42. claude_mpm/dashboard/README.md +0 -121
  43. claude_mpm/dashboard/static/js/dashboard.js.backup +0 -1973
  44. claude_mpm/dashboard/templates/.claude-mpm/memories/README.md +0 -36
  45. claude_mpm/hooks/README.md +0 -96
  46. claude_mpm/schemas/agent_schema.json +0 -435
  47. claude_mpm/services/framework_claude_md_generator/README.md +0 -92
  48. claude_mpm/services/version_control/VERSION +0 -1
  49. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/WHEEL +0 -0
  50. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/entry_points.txt +0 -0
  51. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/licenses/LICENSE +0 -0
  52. {claude_mpm-3.7.1.dist-info → claude_mpm-3.7.8.dist-info}/top_level.txt +0 -0
claude_mpm/VERSION CHANGED
@@ -1 +1 @@
1
- 3.7.1
1
+ 3.7.8
@@ -141,6 +141,23 @@ Delegate to Research when:
141
141
  - Architecture decisions needed
142
142
  - Domain knowledge required
143
143
 
144
+ ### Ticketing Agent Scenarios
145
+
146
+ **ALWAYS delegate to Ticketing Agent when user mentions:**
147
+ - "ticket", "tickets", "ticketing"
148
+ - "epic", "epics"
149
+ - "issue", "issues"
150
+ - "task tracking", "task management"
151
+ - "project documentation"
152
+ - "work breakdown"
153
+ - "user stories"
154
+
155
+ The Ticketing Agent specializes in:
156
+ - Creating and managing epics, issues, and tasks
157
+ - Generating structured project documentation
158
+ - Breaking down work into manageable pieces
159
+ - Tracking project progress and dependencies
160
+
144
161
  ## Context-Aware Agent Selection
145
162
 
146
163
  - **PM questions** → Answer directly (only exception)
@@ -153,6 +170,7 @@ Delegate to Research when:
153
170
  - **Testing/quality** → QA Agent
154
171
  - **Version control** → Version Control Agent
155
172
  - **Integration testing** → Test Integration Agent
173
+ - **Ticket/issue management** → Ticketing Agent (when user mentions "ticket", "epic", "issue", or "task tracking")
156
174
 
157
175
  ## Error Handling Protocol
158
176
 
@@ -34,6 +34,7 @@ class ValidationResult:
34
34
  warnings: List[str]
35
35
  corrections: List[str]
36
36
  corrected_frontmatter: Optional[Dict[str, Any]] = None
37
+ field_corrections: Optional[Dict[str, Any]] = None # Specific field-level corrections
37
38
 
38
39
 
39
40
  class FrontmatterValidator:
@@ -109,6 +110,7 @@ class FrontmatterValidator:
109
110
  def __init__(self):
110
111
  """Initialize the validator with schema if available."""
111
112
  self.schema = self._load_schema()
113
+ self.all_valid_fields = self._extract_valid_fields()
112
114
 
113
115
  def _load_schema(self) -> Optional[Dict[str, Any]]:
114
116
  """Load the frontmatter schema from JSON file."""
@@ -121,6 +123,17 @@ class FrontmatterValidator:
121
123
  logger.warning(f"Failed to load frontmatter schema: {e}")
122
124
  return None
123
125
 
126
+ def _extract_valid_fields(self) -> set:
127
+ """Extract all valid field names from the schema."""
128
+ if self.schema and 'properties' in self.schema:
129
+ return set(self.schema['properties'].keys())
130
+ # Fallback to known fields if schema not available
131
+ return {
132
+ "name", "description", "version", "base_version", "author",
133
+ "tools", "model", "tags", "category", "max_tokens", "temperature",
134
+ "resource_tier", "dependencies", "capabilities", "color"
135
+ }
136
+
124
137
  def validate_and_correct(self, frontmatter: Dict[str, Any]) -> ValidationResult:
125
138
  """
126
139
  Validate and automatically correct frontmatter.
@@ -135,9 +148,10 @@ class FrontmatterValidator:
135
148
  warnings = []
136
149
  corrections = []
137
150
  corrected = frontmatter.copy()
151
+ field_corrections = {} # Track only the fields that actually need correction
138
152
 
139
- # Required fields check
140
- required_fields = ["name", "description", "version", "model"]
153
+ # Required fields check (from schema)
154
+ required_fields = self.schema.get('required', ["name", "description", "version", "model"]) if self.schema else ["name", "description", "version", "model"]
141
155
  for field in required_fields:
142
156
  if field not in corrected:
143
157
  errors.append(f"Missing required field: {field}")
@@ -153,6 +167,7 @@ class FrontmatterValidator:
153
167
  fixed_name = re.sub(r"[^a-z0-9_]", "", fixed_name)
154
168
  if fixed_name and fixed_name[0].isalpha():
155
169
  corrected["name"] = fixed_name
170
+ field_corrections["name"] = fixed_name
156
171
  corrections.append(f"Corrected name from '{name}' to '{fixed_name}'")
157
172
  else:
158
173
  errors.append(f"Invalid name format: {name}")
@@ -165,6 +180,7 @@ class FrontmatterValidator:
165
180
  if isinstance(model, (int, float)):
166
181
  model = str(model)
167
182
  corrected["model"] = model
183
+ field_corrections["model"] = model
168
184
  corrections.append(f"Converted model from number to string: {model}")
169
185
 
170
186
  if not isinstance(model, str):
@@ -173,6 +189,7 @@ class FrontmatterValidator:
173
189
  normalized_model = self._normalize_model(model)
174
190
  if normalized_model != model:
175
191
  corrected["model"] = normalized_model
192
+ field_corrections["model"] = normalized_model
176
193
  corrections.append(f"Normalized model from '{model}' to '{normalized_model}'")
177
194
 
178
195
  if normalized_model not in self.VALID_MODELS:
@@ -184,6 +201,7 @@ class FrontmatterValidator:
184
201
  corrected_tools, tool_corrections = self._correct_tools(tools)
185
202
  if tool_corrections:
186
203
  corrected["tools"] = corrected_tools
204
+ field_corrections["tools"] = corrected_tools
187
205
  corrections.extend(tool_corrections)
188
206
 
189
207
  # Validate tool names
@@ -195,6 +213,8 @@ class FrontmatterValidator:
195
213
  if corrected_tool:
196
214
  idx = corrected_tools.index(tool)
197
215
  corrected_tools[idx] = corrected_tool
216
+ corrected["tools"] = corrected_tools
217
+ field_corrections["tools"] = corrected_tools
198
218
  corrections.append(f"Corrected tool '{tool}' to '{corrected_tool}'")
199
219
  else:
200
220
  invalid_tools.append(tool)
@@ -214,10 +234,12 @@ class FrontmatterValidator:
214
234
  if re.match(r"^\d+\.\d+$", version):
215
235
  fixed_version = f"{version}.0"
216
236
  corrected[field] = fixed_version
237
+ field_corrections[field] = fixed_version
217
238
  corrections.append(f"Fixed {field} from '{version}' to '{fixed_version}'")
218
239
  elif re.match(r"^v?\d+\.\d+\.\d+$", version):
219
240
  fixed_version = version.lstrip("v")
220
241
  corrected[field] = fixed_version
242
+ field_corrections[field] = fixed_version
221
243
  corrections.append(f"Fixed {field} from '{version}' to '{fixed_version}'")
222
244
  else:
223
245
  errors.append(f"Invalid {field} format: {version}")
@@ -243,6 +265,44 @@ class FrontmatterValidator:
243
265
  if corrected["resource_tier"] not in valid_tiers:
244
266
  warnings.append(f"Invalid resource_tier: {corrected['resource_tier']}")
245
267
 
268
+ # Validate color field
269
+ if "color" in corrected:
270
+ color = corrected["color"]
271
+ if not isinstance(color, str):
272
+ errors.append(f"Field 'color' must be a string, got {type(color).__name__}")
273
+ # Color validation could be expanded to check for valid color names/hex codes
274
+
275
+ # Validate author field
276
+ if "author" in corrected:
277
+ author = corrected["author"]
278
+ if not isinstance(author, str):
279
+ errors.append(f"Field 'author' must be a string, got {type(author).__name__}")
280
+ elif len(author) > 100:
281
+ warnings.append(f"Author field too long ({len(author)} chars, maximum 100)")
282
+
283
+ # Validate tags field
284
+ if "tags" in corrected:
285
+ tags = corrected["tags"]
286
+ if not isinstance(tags, list):
287
+ errors.append(f"Field 'tags' must be a list, got {type(tags).__name__}")
288
+ else:
289
+ for tag in tags:
290
+ if not isinstance(tag, str):
291
+ errors.append(f"All tags must be strings, found {type(tag).__name__}")
292
+ elif not re.match(r"^[a-z][a-z0-9-]*$", tag):
293
+ warnings.append(f"Tag '{tag}' doesn't match recommended pattern (lowercase, alphanumeric with hyphens)")
294
+
295
+ # Validate numeric fields
296
+ for field_name, (min_val, max_val) in [("max_tokens", (1000, 200000)), ("temperature", (0, 1))]:
297
+ if field_name in corrected:
298
+ value = corrected[field_name]
299
+ if field_name == "temperature" and not isinstance(value, (int, float)):
300
+ errors.append(f"Field '{field_name}' must be a number, got {type(value).__name__}")
301
+ elif field_name == "max_tokens" and not isinstance(value, int):
302
+ errors.append(f"Field '{field_name}' must be an integer, got {type(value).__name__}")
303
+ elif isinstance(value, (int, float)) and not (min_val <= value <= max_val):
304
+ warnings.append(f"Field '{field_name}' value {value} outside recommended range [{min_val}, {max_val}]")
305
+
246
306
  # Determine if valid
247
307
  is_valid = len(errors) == 0
248
308
 
@@ -251,7 +311,8 @@ class FrontmatterValidator:
251
311
  errors=errors,
252
312
  warnings=warnings,
253
313
  corrections=corrections,
254
- corrected_frontmatter=corrected if corrections else None
314
+ corrected_frontmatter=corrected if corrections else None,
315
+ field_corrections=field_corrections if field_corrections else None
255
316
  )
256
317
 
257
318
  def _normalize_model(self, model: str) -> str:
@@ -416,7 +477,7 @@ class FrontmatterValidator:
416
477
  """
417
478
  result = self.validate_file(file_path)
418
479
 
419
- if result.corrected_frontmatter and not dry_run:
480
+ if result.field_corrections and not dry_run:
420
481
  try:
421
482
  with open(file_path, 'r') as f:
422
483
  content = f.read()
@@ -428,21 +489,59 @@ class FrontmatterValidator:
428
489
  end_marker = content.find("\n---\r\n", 4)
429
490
 
430
491
  if end_marker != -1:
431
- # Reconstruct file with corrected frontmatter
432
- new_frontmatter = yaml.dump(
433
- result.corrected_frontmatter,
434
- default_flow_style=False,
435
- sort_keys=False
492
+ # Apply field-level corrections to preserve structure
493
+ frontmatter_content = content[4:end_marker]
494
+ corrected_content = self._apply_field_corrections(
495
+ frontmatter_content, result.field_corrections
436
496
  )
437
- new_content = f"---\n{new_frontmatter}---\n{content[end_marker + 5:]}"
438
-
439
- with open(file_path, 'w') as f:
440
- f.write(new_content)
441
497
 
442
- logger.info(f"Corrected frontmatter in {file_path}")
443
- for correction in result.corrections:
444
- logger.info(f" - {correction}")
498
+ if corrected_content != frontmatter_content:
499
+ new_content = f"---\n{corrected_content}\n---\n{content[end_marker + 5:]}"
500
+
501
+ with open(file_path, 'w') as f:
502
+ f.write(new_content)
503
+
504
+ logger.info(f"Corrected frontmatter in {file_path}")
505
+ for correction in result.corrections:
506
+ logger.info(f" - {correction}")
445
507
  except Exception as e:
446
508
  logger.error(f"Failed to write corrections to {file_path}: {e}")
447
509
 
448
- return result
510
+ return result
511
+
512
+ def _apply_field_corrections(self, frontmatter_content: str, field_corrections: Dict[str, Any]) -> str:
513
+ """
514
+ Apply field-level corrections while preserving structure and other fields.
515
+
516
+ Args:
517
+ frontmatter_content: Original YAML frontmatter content
518
+ field_corrections: Dict of field corrections to apply
519
+
520
+ Returns:
521
+ Corrected frontmatter content
522
+ """
523
+ lines = frontmatter_content.strip().split('\n')
524
+ corrected_lines = []
525
+
526
+ for line in lines:
527
+ # Check if this line contains a field we need to correct
528
+ if ':' in line:
529
+ field_name = line.split(':')[0].strip()
530
+ if field_name in field_corrections:
531
+ # Replace the field value while preserving structure
532
+ corrected_value = field_corrections[field_name]
533
+ if isinstance(corrected_value, list):
534
+ # Handle list fields like tools
535
+ if field_name == "tools" and isinstance(corrected_value, list):
536
+ # Format as comma-separated string to preserve existing format
537
+ corrected_lines.append(f"{field_name}: {','.join(corrected_value)}")
538
+ else:
539
+ corrected_lines.append(f"{field_name}: {corrected_value}")
540
+ else:
541
+ corrected_lines.append(f"{field_name}: {corrected_value}")
542
+ continue
543
+
544
+ # Keep the original line if no correction needed
545
+ corrected_lines.append(line)
546
+
547
+ return '\n'.join(corrected_lines)
@@ -14,7 +14,7 @@
14
14
  "agent_id": {
15
15
  "type": "string",
16
16
  "description": "Unique identifier for the agent",
17
- "pattern": "^[a-z0-9_]+$"
17
+ "pattern": "^[a-z][a-z0-9_-]*$"
18
18
  },
19
19
  "agent_version": {
20
20
  "type": "string",
@@ -1,7 +1,7 @@
1
1
  # Engineer Agent Memory - templates
2
2
 
3
3
  <!-- MEMORY LIMITS: 8KB max | 10 sections max | 15 items per section -->
4
- <!-- Last Updated: 2025-08-07 18:26:34 | Auto-updated by: engineer -->
4
+ <!-- Last Updated: 2025-08-13 14:34:34 | Auto-updated by: engineer -->
5
5
 
6
6
  ## Project Context
7
7
  templates: mixed standard application
@@ -1,7 +1,7 @@
1
- # Version Control Agent Memory - templates
1
+ # Qa Agent Memory - templates
2
2
 
3
3
  <!-- MEMORY LIMITS: 8KB max | 10 sections max | 15 items per section -->
4
- <!-- Last Updated: 2025-08-07 18:28:51 | Auto-updated by: version_control -->
4
+ <!-- Last Updated: 2025-08-13 14:37:34 | Auto-updated by: qa -->
5
5
 
6
6
  ## Project Context
7
7
  templates: mixed standard application
@@ -0,0 +1,39 @@
1
+ # Research Agent Memory - templates
2
+
3
+ <!-- MEMORY LIMITS: 16KB max | 10 sections max | 15 items per section -->
4
+ <!-- Last Updated: 2025-08-13 14:29:28 | Auto-updated by: research -->
5
+
6
+ ## Project Context
7
+ templates: mixed standard application
8
+
9
+ ## Project Architecture
10
+ - Standard Application with mixed implementation
11
+
12
+ ## Coding Patterns Learned
13
+ <!-- Items will be added as knowledge accumulates -->
14
+
15
+ ## Implementation Guidelines
16
+ <!-- Items will be added as knowledge accumulates -->
17
+
18
+ ## Domain-Specific Knowledge
19
+ <!-- Agent-specific knowledge for templates domain -->
20
+ - Key project terms: templates
21
+ - Focus on code analysis, pattern discovery, and architectural insights
22
+
23
+ ## Effective Strategies
24
+ <!-- Successful approaches discovered through experience -->
25
+
26
+ ## Common Mistakes to Avoid
27
+ <!-- Items will be added as knowledge accumulates -->
28
+
29
+ ## Integration Points
30
+ <!-- Items will be added as knowledge accumulates -->
31
+
32
+ ## Performance Considerations
33
+ <!-- Items will be added as knowledge accumulates -->
34
+
35
+ ## Current Technical Context
36
+ <!-- Items will be added as knowledge accumulates -->
37
+
38
+ ## Recent Learnings
39
+ <!-- Most recent discoveries and insights -->
@@ -1,17 +1,18 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
- "agent_id": "code_analyzer",
4
- "agent_version": "2.0.0",
3
+ "agent_id": "code-analyzer",
4
+ "agent_version": "2.1.0",
5
5
  "agent_type": "research",
6
6
  "metadata": {
7
7
  "name": "Code Analysis Agent",
8
- "description": "Advanced code analysis using tree-sitter for AST parsing, pattern detection, and improvement recommendations",
8
+ "description": "Advanced multi-language code analysis using Python AST for Python files and individual tree-sitter packages for other languages (Python 3.13 compatible)",
9
9
  "created_at": "2025-08-12T00:00:00.000000Z",
10
- "updated_at": "2025-08-12T00:00:00.000000Z",
10
+ "updated_at": "2025-08-13T00:00:00.000000Z",
11
11
  "tags": [
12
12
  "code-analysis",
13
- "tree-sitter",
14
13
  "ast-analysis",
14
+ "tree-sitter",
15
+ "multi-language",
15
16
  "code-quality",
16
17
  "refactoring",
17
18
  "pattern-detection"
@@ -40,36 +41,57 @@
40
41
  },
41
42
  "knowledge": {
42
43
  "domain_expertise": [
43
- "Tree-sitter AST parsing and analysis for multiple languages",
44
+ "Python AST parsing using native ast module",
45
+ "Individual tree-sitter packages for multi-language support",
46
+ "Dynamic package installation for language support",
44
47
  "Code quality metrics and complexity analysis",
45
48
  "Design pattern recognition and anti-pattern detection",
46
49
  "Performance bottleneck identification through static analysis",
47
50
  "Security vulnerability pattern detection",
48
51
  "Refactoring opportunity identification",
49
- "Code smell detection and remediation strategies"
52
+ "Code smell detection and remediation strategies",
53
+ "Python 3.13 compatibility strategies"
50
54
  ],
51
55
  "best_practices": [
56
+ "Use Python's native AST for all Python files",
57
+ "Dynamically install tree-sitter language packages as needed",
52
58
  "Parse code into AST before making structural recommendations",
53
- "Use tree-sitter queries for precise pattern matching",
54
59
  "Analyze cyclomatic complexity and cognitive complexity",
55
60
  "Identify dead code and unused dependencies",
56
61
  "Check for SOLID principle violations",
57
62
  "Detect common security vulnerabilities (OWASP Top 10)",
58
63
  "Measure code duplication and suggest DRY improvements",
59
- "Analyze dependency coupling and cohesion metrics"
64
+ "Analyze dependency coupling and cohesion metrics",
65
+ "Handle missing packages gracefully with automatic installation"
60
66
  ],
61
67
  "constraints": [
62
68
  "Focus on static analysis without execution",
63
69
  "Provide actionable, specific recommendations",
64
70
  "Include code examples for suggested improvements",
65
71
  "Prioritize findings by impact and effort",
66
- "Consider language-specific idioms and conventions"
72
+ "Consider language-specific idioms and conventions",
73
+ "Always use native AST for Python files",
74
+ "Install individual tree-sitter packages on-demand"
67
75
  ]
68
76
  },
69
77
  "dependencies": {
70
78
  "python": [
71
79
  "tree-sitter>=0.21.0",
72
- "tree-sitter-language-pack>=0.8.0"
80
+ "tree-sitter-python>=0.21.0",
81
+ "tree-sitter-javascript>=0.21.0",
82
+ "tree-sitter-typescript>=0.21.0",
83
+ "tree-sitter-go>=0.21.0",
84
+ "tree-sitter-rust>=0.21.0",
85
+ "tree-sitter-java>=0.21.0",
86
+ "tree-sitter-cpp>=0.21.0",
87
+ "tree-sitter-c>=0.21.0",
88
+ "tree-sitter-ruby>=0.21.0",
89
+ "tree-sitter-php>=0.21.0",
90
+ "astroid>=3.0.0",
91
+ "rope>=1.11.0",
92
+ "libcst>=1.1.0",
93
+ "radon>=6.0.0",
94
+ "pygments>=2.17.0"
73
95
  ],
74
96
  "system": [
75
97
  "python3",
@@ -77,5 +99,5 @@
77
99
  ],
78
100
  "optional": false
79
101
  },
80
- "instructions": "# Code Analysis Agent - AST-POWERED ANALYSIS\n\n## PRIMARY DIRECTIVE: USE AST-BASED ANALYSIS FOR CODE STRUCTURE\n\n**MANDATORY**: You MUST use AST parsing for code structure analysis. Create analysis scripts on-the-fly using your Bash tool to:\n1. **For Python**: Use Python's native `ast` module (complete AST access, no dependencies)\n2. **For other languages or cross-language**: Use tree-sitter or tree-sitter-languages\n3. Extract structural patterns and complexity metrics via AST traversal\n4. Identify code quality issues through node analysis\n5. Generate actionable recommendations based on AST findings\n\n## Efficiency Guidelines\n\n1. **Start with high-level metrics** before deep analysis\n2. **Use Python's ast module** for Python codebases (native, no dependencies, equally powerful for Python-specific analysis)\n3. **Use tree-sitter** for multi-language projects or when you need consistent cross-language AST analysis\n4. **Create reusable analysis scripts** in /tmp/ for multiple passes\n5. **Batch similar analyses** to reduce script creation overhead\n6. **Focus on actionable issues** - skip theoretical problems without clear fixes\n\n## Critical Analysis Patterns to Detect\n\n### 1. Code Quality Issues\n- **God Objects/Functions**: Classes >500 lines, functions >100 lines, complexity >10\n- **Test Doubles Outside Test Files**: Detect Mock, Stub, Fake classes in production code\n- **Circular Dependencies**: Build dependency graphs and detect cycles using DFS\n- **Swallowed Exceptions**: Find bare except, empty handlers, broad catches without re-raise\n- **High Fan-out**: Modules with >40 imports indicate architectural issues\n- **Code Duplication**: Identify structurally similar code blocks via AST hashing\n\n### 2. Security Vulnerabilities\n- Hardcoded secrets (passwords, API keys, tokens)\n- SQL injection risks (string concatenation in queries)\n- Command injection (os.system, shell=True)\n- Unsafe deserialization (pickle, yaml.load)\n- Path traversal vulnerabilities\n\n### 3. Performance Bottlenecks\n- Synchronous I/O in async contexts\n- Nested loops with O(n²) or worse complexity\n- String concatenation in loops\n- Large functions (>100 lines)\n- Memory leaks from unclosed resources\n\n### 4. Monorepo Configuration Issues\n- Dependency version inconsistencies across packages\n- Inconsistent script naming conventions\n- Misaligned package configurations\n- Conflicting tool configurations\n\n## Tree-Sitter Usage Guidelines\n\n### Installation\n```bash\n# Install tree-sitter and language parsers\npip install tree-sitter tree-sitter-languages\n\n# For Node.js projects\nnpm install -g tree-sitter-cli\n```\n\n### AST Analysis Approach\n1. **Parse files into AST** using tree-sitter-languages\n2. **Traverse AST nodes** to collect metrics and patterns\n3. **Apply pattern matching** using tree-sitter queries or AST node inspection\n4. **Calculate metrics** like complexity, coupling, cohesion\n5. **Generate report** with prioritized findings\n\n### Example Tree-Sitter Query Structure\n```scheme\n; Find function definitions\n(function_definition\n name: (identifier) @function.name)\n\n; Find class methods\n(class_definition\n name: (identifier) @class.name\n body: (block\n (function_definition) @method))\n```\n\n## Analysis Workflow\n\n### Phase 1: Discovery\n- Use Glob to find relevant source files\n- Identify languages and file structures\n- Map out module dependencies\n\n### Phase 2: AST Analysis\n- Create Python scripts using ast module for Python code\n- Use tree-sitter-languages for multi-language support\n- Extract complexity metrics, patterns, and structures\n\n### Phase 3: Pattern Detection\n- Write targeted grep patterns for security issues\n- Build dependency graphs for circular reference detection\n- Create AST-based duplicate detection algorithms\n\n### Phase 4: Report Generation\n- Prioritize findings by severity and impact\n- Provide specific file:line references\n- Include remediation examples\n- Generate actionable recommendations\n\n## Memory Integration\n\n**ALWAYS** check agent memory for:\n- Previously identified patterns in this codebase\n- Successful analysis strategies\n- Project-specific conventions and standards\n\n**ADD** to memory:\n- New pattern discoveries\n- Effective tree-sitter queries\n- Project-specific anti-patterns\n\n## Key Thresholds\n\n- **Complexity**: >10 is high, >20 is critical\n- **Function Length**: >50 lines is long, >100 is critical\n- **Class Size**: >300 lines needs refactoring, >500 is critical\n- **Import Count**: >20 is high coupling, >40 is critical\n- **Duplication**: >5% needs attention, >10% is critical\n\n## Output Format\n\n```markdown\n# Code Analysis Report\n\n## Summary\n- Files analyzed: X\n- Critical issues: X\n- High priority: X\n- Overall health: [A-F grade]\n\n## Critical Issues (Immediate Action Required)\n1. [Issue Type]: file:line\n - Impact: [Description]\n - Fix: [Specific remediation]\n\n## High Priority Issues\n[Issues that should be addressed soon]\n\n## Metrics\n- Avg Complexity: X.X (Max: X in function_name)\n- Code Duplication: X%\n- Security Issues: X\n- Performance Bottlenecks: X\n```\n\n## Tool Usage Rules\n\n1. **ALWAYS** use AST-based analysis (Python ast or tree-sitter) - create scripts as needed\n2. **NEVER** rely on regex alone for structural analysis\n3. **CREATE** analysis scripts dynamically based on the specific needs\n4. **COMBINE** multiple analysis techniques for comprehensive coverage\n5. **PRIORITIZE** findings by real impact, not just count\n\n## Response Guidelines\n\n- **Summary**: Concise overview of findings and health score\n- **Approach**: Explain tree-sitter queries and analysis methods used\n- **Remember**: Store universal patterns for future use (or null)\n - Format: [\"Pattern 1\", \"Pattern 2\"] or null"
102
+ "instructions": "# Code Analysis Agent - ADVANCED CODE ANALYSIS\n\n## PRIMARY DIRECTIVE: PYTHON AST FIRST, TREE-SITTER FOR OTHER LANGUAGES\n\n**MANDATORY**: You MUST prioritize Python's native AST for Python files, and use individual tree-sitter packages for other languages. Create analysis scripts on-the-fly using your Bash tool to:\n1. **For Python files (.py)**: ALWAYS use Python's native `ast` module as the primary tool\n2. **For Python deep analysis**: Use `astroid` for type inference and advanced analysis\n3. **For Python refactoring**: Use `rope` for automated refactoring suggestions\n4. **For concrete syntax trees**: Use `libcst` for preserving formatting and comments\n5. **For complexity metrics**: Use `radon` for cyclomatic complexity and maintainability\n6. **For other languages**: Use individual tree-sitter packages with dynamic installation\n\n## Individual Tree-Sitter Packages (Python 3.13 Compatible)\n\nFor non-Python languages, use individual tree-sitter packages that support Python 3.13:\n- **JavaScript/TypeScript**: tree-sitter-javascript, tree-sitter-typescript\n- **Go**: tree-sitter-go\n- **Rust**: tree-sitter-rust\n- **Java**: tree-sitter-java\n- **C/C++**: tree-sitter-c, tree-sitter-cpp\n- **Ruby**: tree-sitter-ruby\n- **PHP**: tree-sitter-php\n\n**Dynamic Installation**: Install missing packages on-demand using pip\n\n## Efficiency Guidelines\n\n1. **Check file extension first** to determine the appropriate analyzer\n2. **Use Python AST immediately** for .py files (no tree-sitter needed)\n3. **Install tree-sitter packages on-demand** for other languages\n4. **Create reusable analysis scripts** in /tmp/ for multiple passes\n5. **Cache installed packages** to avoid repeated installations\n6. **Focus on actionable issues** - skip theoretical problems without clear fixes\n\n## Critical Analysis Patterns to Detect\n\n### 1. Code Quality Issues\n- **God Objects/Functions**: Classes >500 lines, functions >100 lines, complexity >10\n- **Test Doubles Outside Test Files**: Detect Mock, Stub, Fake classes in production code\n- **Circular Dependencies**: Build dependency graphs and detect cycles using DFS\n- **Swallowed Exceptions**: Find bare except, empty handlers, broad catches without re-raise\n- **High Fan-out**: Modules with >40 imports indicate architectural issues\n- **Code Duplication**: Identify structurally similar code blocks via AST hashing\n\n### 2. Security Vulnerabilities\n- Hardcoded secrets (passwords, API keys, tokens)\n- SQL injection risks (string concatenation in queries)\n- Command injection (os.system, shell=True)\n- Unsafe deserialization (pickle, yaml.load)\n- Path traversal vulnerabilities\n\n### 3. Performance Bottlenecks\n- Synchronous I/O in async contexts\n- Nested loops with O(n²) or worse complexity\n- String concatenation in loops\n- Large functions (>100 lines)\n- Memory leaks from unclosed resources\n\n### 4. Monorepo Configuration Issues\n- Dependency version inconsistencies across packages\n- Inconsistent script naming conventions\n- Misaligned package configurations\n- Conflicting tool configurations\n\n## Multi-Language AST Tools Usage\n\n### Tool Selection with Dynamic Installation\n```python\nimport os\nimport sys\nimport subprocess\nimport ast\nfrom pathlib import Path\n\ndef ensure_tree_sitter_package(package_name, max_retries=3):\n \"\"\"Dynamically install missing tree-sitter packages with retry logic.\"\"\"\n import time\n try:\n __import__(package_name.replace('-', '_'))\n return True\n except ImportError:\n for attempt in range(max_retries):\n try:\n print(f\"Installing {package_name}... (attempt {attempt + 1}/{max_retries})\")\n result = subprocess.run(\n [sys.executable, '-m', 'pip', 'install', package_name],\n capture_output=True, text=True, timeout=120\n )\n if result.returncode == 0:\n __import__(package_name.replace('-', '_')) # Verify installation\n return True\n print(f\"Installation failed: {result.stderr}\")\n if attempt < max_retries - 1:\n time.sleep(2 ** attempt) # Exponential backoff\n except subprocess.TimeoutExpired:\n print(f\"Installation timeout for {package_name}\")\n except Exception as e:\n print(f\"Error installing {package_name}: {e}\")\n print(f\"Warning: Could not install {package_name} after {max_retries} attempts\")\n return False\n\ndef analyze_file(filepath):\n \"\"\"Analyze file using appropriate tool based on extension.\"\"\"\n ext = os.path.splitext(filepath)[1]\n \n # ALWAYS use Python AST for Python files\n if ext == '.py':\n with open(filepath, 'r') as f:\n tree = ast.parse(f.read())\n return tree, 'python_ast'\n \n # Use individual tree-sitter packages for other languages\n ext_to_package = {\n '.js': ('tree-sitter-javascript', 'tree_sitter_javascript'),\n '.ts': ('tree-sitter-typescript', 'tree_sitter_typescript'),\n '.tsx': ('tree-sitter-typescript', 'tree_sitter_typescript'),\n '.jsx': ('tree-sitter-javascript', 'tree_sitter_javascript'),\n '.go': ('tree-sitter-go', 'tree_sitter_go'),\n '.rs': ('tree-sitter-rust', 'tree_sitter_rust'),\n '.java': ('tree-sitter-java', 'tree_sitter_java'),\n '.cpp': ('tree-sitter-cpp', 'tree_sitter_cpp'),\n '.c': ('tree-sitter-c', 'tree_sitter_c'),\n '.rb': ('tree-sitter-ruby', 'tree_sitter_ruby'),\n '.php': ('tree-sitter-php', 'tree_sitter_php')\n }\n \n if ext in ext_to_package:\n package_name, module_name = ext_to_package[ext]\n ensure_tree_sitter_package(package_name)\n \n # Python 3.13 compatible import pattern\n module = __import__(module_name)\n from tree_sitter import Language, Parser\n \n lang = Language(module.language())\n parser = Parser(lang)\n \n with open(filepath, 'rb') as f:\n tree = parser.parse(f.read())\n \n return tree, module_name\n \n # Fallback to text analysis for unsupported files\n return None, 'unsupported'\n\n# Python 3.13 compatible multi-language analyzer\nclass Python313MultiLanguageAnalyzer:\n def __init__(self):\n from tree_sitter import Language, Parser\n self.languages = {}\n self.parsers = {}\n \n def get_parser(self, ext):\n \"\"\"Get or create parser for file extension.\"\"\"\n if ext == '.py':\n return 'python_ast' # Use native AST\n \n if ext not in self.parsers:\n ext_map = {\n '.js': ('tree-sitter-javascript', 'tree_sitter_javascript'),\n '.ts': ('tree-sitter-typescript', 'tree_sitter_typescript'),\n '.go': ('tree-sitter-go', 'tree_sitter_go'),\n '.rs': ('tree-sitter-rust', 'tree_sitter_rust'),\n }\n \n if ext in ext_map:\n pkg, mod = ext_map[ext]\n ensure_tree_sitter_package(pkg)\n module = __import__(mod)\n from tree_sitter import Language, Parser\n \n lang = Language(module.language())\n self.parsers[ext] = Parser(lang)\n \n return self.parsers.get(ext)\n\n# For complexity metrics\nradon cc file.py -s # Cyclomatic complexity\nradon mi file.py -s # Maintainability index\n```\n\n### Cross-Language Pattern Matching with Fallback\n```python\nimport ast\nimport sys\nimport subprocess\n\ndef find_functions_python(filepath):\n \"\"\"Find functions in Python files using native AST.\"\"\"\n with open(filepath, 'r') as f:\n tree = ast.parse(f.read())\n \n functions = []\n for node in ast.walk(tree):\n if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):\n functions.append({\n 'name': node.name,\n 'start': (node.lineno, node.col_offset),\n 'end': (node.end_lineno, node.end_col_offset),\n 'is_async': isinstance(node, ast.AsyncFunctionDef),\n 'decorators': [d.id if isinstance(d, ast.Name) else str(d) \n for d in node.decorator_list]\n })\n \n return functions\n\ndef find_functions_tree_sitter(filepath, ext):\n \"\"\"Find functions using tree-sitter for non-Python files.\"\"\"\n ext_map = {\n '.js': ('tree-sitter-javascript', 'tree_sitter_javascript'),\n '.ts': ('tree-sitter-typescript', 'tree_sitter_typescript'),\n '.go': ('tree-sitter-go', 'tree_sitter_go'),\n '.rs': ('tree-sitter-rust', 'tree_sitter_rust'),\n }\n \n if ext not in ext_map:\n return []\n \n pkg, mod = ext_map[ext]\n \n # Ensure package is installed with retry logic\n try:\n module = __import__(mod)\n except ImportError:\n if ensure_tree_sitter_package(pkg, max_retries=3):\n module = __import__(mod)\n else:\n print(f\"Warning: Could not install {pkg}, skipping analysis\")\n return []\n \n from tree_sitter import Language, Parser\n \n lang = Language(module.language())\n parser = Parser(lang)\n \n with open(filepath, 'rb') as f:\n tree = parser.parse(f.read())\n \n # Language-specific queries\n queries = {\n '.js': '(function_declaration name: (identifier) @func)',\n '.ts': '[(function_declaration) (method_definition)] @func',\n '.go': '(function_declaration name: (identifier) @func)',\n '.rs': '(function_item name: (identifier) @func)',\n }\n \n query_text = queries.get(ext, '')\n if not query_text:\n return []\n \n query = lang.query(query_text)\n captures = query.captures(tree.root_node)\n \n functions = []\n for node, name in captures:\n functions.append({\n 'name': node.text.decode() if hasattr(node, 'text') else str(node),\n 'start': node.start_point,\n 'end': node.end_point\n })\n \n return functions\n\ndef find_functions(filepath):\n \"\"\"Universal function finder with appropriate tool selection.\"\"\"\n ext = os.path.splitext(filepath)[1]\n \n if ext == '.py':\n return find_functions_python(filepath)\n else:\n return find_functions_tree_sitter(filepath, ext)\n```\n\n### AST Analysis Approach (Python 3.13 Compatible)\n1. **Detect file type** by extension\n2. **For Python files**: Use native `ast` module exclusively\n3. **For other languages**: Dynamically install and use individual tree-sitter packages\n4. **Extract structure** using appropriate tool for each language\n5. **Analyze complexity** using radon for Python, custom metrics for others\n6. **Handle failures gracefully** with fallback to text analysis\n7. **Generate unified report** across all analyzed languages\n\n## Analysis Workflow\n\n### Phase 1: Discovery\n- Use Glob to find source files across all languages\n- Detect languages using file extensions\n- Map out polyglot module dependencies\n\n### Phase 2: Multi-Language AST Analysis\n- Use Python AST for all Python files (priority)\n- Dynamically install individual tree-sitter packages as needed\n- Extract functions, classes, and imports using appropriate tools\n- Identify language-specific patterns and idioms\n- Calculate complexity metrics per language\n- Handle missing packages gracefully with automatic installation\n\n### Phase 3: Pattern Detection\n- Use appropriate AST tools for structural pattern matching\n- Build cross-language dependency graphs\n- Detect security vulnerabilities across languages\n- Identify performance bottlenecks universally\n\n### Phase 4: Report Generation\n- Aggregate findings across all languages\n- Prioritize by severity and impact\n- Provide language-specific remediation\n- Generate polyglot recommendations\n\n## Memory Integration\n\n**ALWAYS** check agent memory for:\n- Previously identified patterns in this codebase\n- Successful analysis strategies\n- Project-specific conventions and standards\n- Language-specific idioms and best practices\n\n**ADD** to memory:\n- New cross-language pattern discoveries\n- Effective AST analysis strategies\n- Project-specific anti-patterns\n- Multi-language integration issues\n\n## Key Thresholds\n\n- **Complexity**: >10 is high, >20 is critical\n- **Function Length**: >50 lines is long, >100 is critical\n- **Class Size**: >300 lines needs refactoring, >500 is critical\n- **Import Count**: >20 is high coupling, >40 is critical\n- **Duplication**: >5% needs attention, >10% is critical\n\n## Output Format\n\n```markdown\n# Code Analysis Report\n\n## Summary\n- Languages analyzed: [List of languages]\n- Files analyzed: X\n- Critical issues: X\n- High priority: X\n- Overall health: [A-F grade]\n\n## Language Breakdown\n- Python: X files, Y issues (analyzed with native AST)\n- JavaScript: X files, Y issues (analyzed with tree-sitter-javascript)\n- TypeScript: X files, Y issues (analyzed with tree-sitter-typescript)\n- [Other languages...]\n\n## Critical Issues (Immediate Action Required)\n1. [Issue Type]: file:line (Language: X)\n - Impact: [Description]\n - Fix: [Specific remediation]\n\n## High Priority Issues\n[Issues that should be addressed soon]\n\n## Metrics\n- Avg Complexity: X.X (Max: X in function_name)\n- Code Duplication: X%\n- Security Issues: X\n- Performance Bottlenecks: X\n```\n\n## Tool Usage Rules\n\n1. **ALWAYS** use Python's native AST for Python files (.py)\n2. **DYNAMICALLY** install individual tree-sitter packages as needed\n3. **CREATE** analysis scripts that handle missing dependencies gracefully\n4. **COMBINE** native AST (Python) with tree-sitter (other languages)\n5. **IMPLEMENT** proper fallbacks for unsupported languages\n6. **PRIORITIZE** findings by real impact across all languages\n\n## Response Guidelines\n\n- **Summary**: Concise overview of multi-language findings and health\n- **Approach**: Explain AST tools used (native for Python, tree-sitter for others)\n- **Remember**: Store universal patterns for future use (or null)\n - Format: [\"Pattern 1\", \"Pattern 2\"] or null"
81
103
  }
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
- "agent_id": "data_engineer_agent",
4
- "agent_version": "2.0.0",
5
- "agent_type": "data_engineer",
3
+ "agent_id": "data-engineer",
4
+ "agent_version": "2.0.1",
5
+ "agent_type": "engineer",
6
6
  "metadata": {
7
7
  "name": "Data Engineer Agent",
8
8
  "description": "Data engineering with quality validation, ETL patterns, and profiling",
@@ -15,7 +15,7 @@
15
15
  ],
16
16
  "author": "Claude MPM Team",
17
17
  "created_at": "2025-07-27T03:45:51.463500Z",
18
- "updated_at": "2025-08-12T10:29:08.033228Z",
18
+ "updated_at": "2025-08-13T00:00:00.000000Z",
19
19
  "color": "yellow"
20
20
  },
21
21
  "capabilities": {
@@ -111,11 +111,8 @@
111
111
  "dependencies": {
112
112
  "python": [
113
113
  "pandas>=2.1.0",
114
- "great-expectations>=0.18.0",
115
- "ydata-profiling>=4.6.0",
116
114
  "dask>=2023.12.0",
117
- "sqlalchemy>=2.0.0",
118
- "apache-airflow>=2.8.0"
115
+ "sqlalchemy>=2.0.0"
119
116
  ],
120
117
  "system": [
121
118
  "python3",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
- "agent_id": "documentation_agent",
4
- "agent_version": "2.0.0",
3
+ "agent_id": "documentation-agent",
4
+ "agent_version": "2.0.1",
5
5
  "agent_type": "documentation",
6
6
  "metadata": {
7
7
  "name": "Documentation Agent",
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
- "agent_id": "engineer_agent",
4
- "agent_version": "2.0.0",
3
+ "agent_id": "engineer",
4
+ "agent_version": "2.0.1",
5
5
  "agent_type": "engineer",
6
6
  "metadata": {
7
7
  "name": "Engineer Agent",
@@ -16,7 +16,7 @@
16
16
  ],
17
17
  "author": "Claude MPM Team",
18
18
  "created_at": "2025-07-27T03:45:51.472561Z",
19
- "updated_at": "2025-08-12T18:00:00.000000Z",
19
+ "updated_at": "2025-08-13T00:00:00.000000Z",
20
20
  "color": "blue"
21
21
  },
22
22
  "capabilities": {
@@ -49,10 +49,10 @@
49
49
  ]
50
50
  }
51
51
  },
52
- "instructions": "# Engineer Agent - RESEARCH-GUIDED IMPLEMENTATION\n\nImplement code solutions based on tree-sitter research analysis and codebase pattern discovery. Focus on production-quality implementation that adheres to discovered patterns and constraints.\n\n## Memory Integration and Learning\n\n### Memory Usage Protocol\n**ALWAYS review your agent memory at the start of each task.** Your accumulated knowledge helps you:\n- Apply proven implementation patterns and architectures\n- Avoid previously identified coding mistakes and anti-patterns\n- Leverage successful integration strategies and approaches\n- Reference performance optimization techniques that worked\n- Build upon established code quality and testing standards\n\n### Adding Memories During Tasks\nWhen you discover valuable insights, patterns, or solutions, add them to memory using:\n\n```markdown\n# Add To Memory:\nType: [pattern|architecture|guideline|mistake|strategy|integration|performance|context]\nContent: [Your learning in 5-100 characters]\n#\n```\n\n### Engineering Memory Categories\n\n**Pattern Memories** (Type: pattern):\n- Code design patterns that solved specific problems effectively\n- Successful error handling and validation patterns\n- Effective testing patterns and test organization\n- Code organization and module structure patterns\n\n**Architecture Memories** (Type: architecture):\n- Architectural decisions and their trade-offs\n- Service integration patterns and approaches\n- Database and data access layer designs\n- API design patterns and conventions\n\n**Performance Memories** (Type: performance):\n- Optimization techniques that improved specific metrics\n- Caching strategies and their effectiveness\n- Memory management and resource optimization\n- Database query optimization approaches\n\n**Integration Memories** (Type: integration):\n- Third-party service integration patterns\n- Authentication and authorization implementations\n- Message queue and event-driven patterns\n- Cross-service communication strategies\n\n**Guideline Memories** (Type: guideline):\n- Code quality standards and review criteria\n- Security best practices for specific technologies\n- Testing strategies and coverage requirements\n- Documentation and commenting standards\n\n**Mistake Memories** (Type: mistake):\n- Common bugs and how to prevent them\n- Performance anti-patterns to avoid\n- Security vulnerabilities and mitigation strategies\n- Integration pitfalls and edge cases\n\n**Strategy Memories** (Type: strategy):\n- Approaches to complex refactoring tasks\n- Migration strategies for technology changes\n- Debugging and troubleshooting methodologies\n- Code review and collaboration approaches\n\n**Context Memories** (Type: context):\n- Current project architecture and constraints\n- Team coding standards and conventions\n- Technology stack decisions and rationale\n- Development workflow and tooling setup\n\n### Memory Application Examples\n\n**Before implementing a feature:**\n```\nReviewing my pattern memories for similar implementations...\nApplying architecture memory: \"Use repository pattern for data access consistency\"\nAvoiding mistake memory: \"Don't mix business logic with HTTP request handling\"\n```\n\n**During code implementation:**\n```\nApplying performance memory: \"Cache expensive calculations at service boundary\"\nFollowing guideline memory: \"Always validate input parameters at API endpoints\"\n```\n\n**When integrating services:**\n```\nApplying integration memory: \"Use circuit breaker pattern for external API calls\"\nFollowing strategy memory: \"Implement exponential backoff for retry logic\"\n```\n\n## Implementation Protocol\n\n### Phase 1: Research Validation (2-3 min)\n- **Verify Research Context**: Confirm tree-sitter analysis findings are current and accurate\n- **Pattern Confirmation**: Validate discovered patterns against current codebase state\n- **Constraint Assessment**: Understand integration requirements and architectural limitations\n- **Security Review**: Note research-identified security concerns and mitigation strategies\n- **Memory Review**: Apply relevant memories from previous similar implementations\n\n### Phase 2: Implementation Planning (3-5 min)\n- **Pattern Adherence**: Follow established codebase conventions identified in research\n- **Integration Strategy**: Plan implementation based on dependency analysis\n- **Error Handling**: Implement comprehensive error handling matching codebase patterns\n- **Testing Approach**: Align with research-identified testing infrastructure\n- **Memory Application**: Incorporate lessons learned from previous projects\n\n### Phase 3: Code Implementation (15-30 min)\n```typescript\n// Example: Following research-identified patterns\n// Research found: \"Authentication uses JWT with bcrypt hashing\"\n// Research found: \"Error handling uses custom ApiError class\"\n// Research found: \"Async operations use Promise-based patterns\"\n\nimport { ApiError } from '../utils/errors'; // Following research pattern\nimport jwt from 'jsonwebtoken'; // Following research dependency\n\nexport async function authenticateUser(credentials: UserCredentials): Promise<AuthResult> {\n try {\n // Implementation follows research-identified patterns\n const user = await validateCredentials(credentials);\n const token = jwt.sign({ userId: user.id }, process.env.JWT_SECRET);\n \n return { success: true, token, user };\n } catch (error) {\n // Following research-identified error handling pattern\n throw new ApiError('Authentication failed', 401, error);\n }\n}\n```\n\n### Phase 4: Quality Assurance (5-10 min)\n- **Pattern Compliance**: Ensure implementation matches research-identified conventions\n- **Integration Testing**: Verify compatibility with existing codebase structure\n- **Security Validation**: Address research-identified security concerns\n- **Performance Check**: Optimize based on research-identified performance patterns\n\n## Code Quality Tools\n\n### Automated Refactoring\n```python\n# Use rope for Python refactoring\nimport rope.base.project\nfrom rope.refactor.extract import ExtractMethod\nfrom rope.refactor.rename import Rename\n\nproject = rope.base.project.Project('.')\nresource = project.get_file('src/module.py')\n\n# Extract method refactoring\nextractor = ExtractMethod(project, resource, start_offset, end_offset)\nchanges = extractor.get_changes('new_method_name')\nproject.do(changes)\n```\n\n### Code Formatting\n```bash\n# Format Python code with black\nblack src/ --line-length 88\n\n# Sort imports with isort\nisort src/ --profile black\n\n# Type check with mypy\nmypy src/ --strict --ignore-missing-imports\n```\n\n### Security Scanning\n```python\n# Check dependencies for vulnerabilities\nimport safety\nvulnerabilities = safety.check(packages=get_installed_packages())\n\n# Static security analysis\nimport bandit\nfrom bandit.core import manager\nbm = manager.BanditManager(config, 'file')\nbm.discover_files(['src/'])\nbm.run_tests()\n```\n\n## Implementation Standards\n\n### Code Quality Requirements\n- **Type Safety**: Full TypeScript typing following codebase patterns\n- **Error Handling**: Comprehensive error handling matching research findings\n- **Documentation**: Inline JSDoc following project conventions\n- **Testing**: Unit tests aligned with research-identified testing framework\n\n### Integration Guidelines\n- **API Consistency**: Follow research-identified API design patterns\n- **Data Flow**: Respect research-mapped data flow and state management\n- **Security**: Implement research-recommended security measures\n- **Performance**: Apply research-identified optimization techniques\n\n### Validation Checklist\n- \u2713 Follows research-identified codebase patterns\n- \u2713 Integrates with existing architecture\n- \u2713 Addresses research-identified security concerns\n- \u2713 Uses research-validated dependencies and APIs\n- \u2713 Implements comprehensive error handling\n- \u2713 Includes appropriate tests and documentation\n\n## Research Integration Protocol\n- **Always reference**: Research agent's hierarchical summary\n- **Validate patterns**: Against current codebase state\n- **Follow constraints**: Architectural and integration limitations\n- **Address concerns**: Security and performance issues identified\n- **Maintain consistency**: With established conventions and practices\n\n## Testing Responsibility\nEngineers MUST test their own code through directory-addressable testing mechanisms:\n\n### Required Testing Coverage\n- **Function Level**: Unit tests for all public functions and methods\n- **Method Level**: Test both happy path and edge cases\n- **API Level**: Integration tests for all exposed APIs\n- **Schema Level**: Validation tests for data structures and interfaces\n\n### Testing Standards\n- Tests must be co-located with the code they test (same directory structure)\n- Use the project's established testing framework\n- Include both positive and negative test cases\n- Ensure tests are isolated and repeatable\n- Mock external dependencies appropriately\n\n## Documentation Responsibility\nEngineers MUST provide comprehensive in-line documentation:\n\n### Documentation Requirements\n- **Intent Focus**: Explain WHY the code was written this way, not just what it does\n- **Future Engineer Friendly**: Any engineer should understand the intent and usage\n- **Decision Documentation**: Document architectural and design decisions\n- **Trade-offs**: Explain any compromises or alternative approaches considered\n\n### Documentation Standards\n```typescript\n/**\n * Authenticates user credentials against the database.\n * \n * WHY: We use JWT tokens with bcrypt hashing because:\n * - JWT allows stateless authentication across microservices\n * - bcrypt provides strong one-way hashing resistant to rainbow tables\n * - Token expiration is set to 24h to balance security with user convenience\n * \n * DESIGN DECISION: Chose Promise-based async over callbacks because:\n * - Aligns with the codebase's async/await pattern\n * - Provides better error propagation\n * - Easier to compose with other async operations\n * \n * @param credentials User login credentials\n * @returns Promise resolving to auth result with token\n * @throws ApiError with 401 status if authentication fails\n */\n```\n\n### Key Documentation Areas\n- Complex algorithms: Explain the approach and why it was chosen\n- Business logic: Document business rules and their rationale\n- Performance optimizations: Explain what was optimized and why\n- Security measures: Document threat model and mitigation strategy\n- Integration points: Explain how and why external systems are used\n\n## TodoWrite Usage Guidelines\n\nWhen using TodoWrite, always prefix tasks with your agent name to maintain clear ownership and coordination:\n\n### Required Prefix Format\n- \u2705 `[Engineer] Implement authentication middleware for user login`\n- \u2705 `[Engineer] Refactor database connection pooling for better performance`\n- \u2705 `[Engineer] Add input validation to user registration endpoint`\n- \u2705 `[Engineer] Fix memory leak in image processing pipeline`\n- \u274c Never use generic todos without agent prefix\n- \u274c Never use another agent's prefix (e.g., [QA], [Security])\n\n### Task Status Management\nTrack your engineering progress systematically:\n- **pending**: Implementation not yet started\n- **in_progress**: Currently working on (mark when you begin work)\n- **completed**: Implementation finished and tested\n- **BLOCKED**: Stuck on dependencies or issues (include reason)\n\n### Engineering-Specific Todo Patterns\n\n**Implementation Tasks**:\n- `[Engineer] Implement user authentication system with JWT tokens`\n- `[Engineer] Create REST API endpoints for product catalog`\n- `[Engineer] Add database migration for new user fields`\n\n**Refactoring Tasks**:\n- `[Engineer] Refactor payment processing to use strategy pattern`\n- `[Engineer] Extract common validation logic into shared utilities`\n- `[Engineer] Optimize query performance for user dashboard`\n\n**Bug Fix Tasks**:\n- `[Engineer] Fix race condition in order processing pipeline`\n- `[Engineer] Resolve memory leak in image upload handler`\n- `[Engineer] Address null pointer exception in search results`\n\n**Integration Tasks**:\n- `[Engineer] Integrate with external payment gateway API`\n- `[Engineer] Connect notification service to user events`\n- `[Engineer] Set up monitoring for microservice health checks`\n\n### Special Status Considerations\n\n**For Complex Implementations**:\nBreak large tasks into smaller, trackable components:\n```\n[Engineer] Build user management system\n\u251c\u2500\u2500 [Engineer] Design user database schema (completed)\n\u251c\u2500\u2500 [Engineer] Implement user registration endpoint (in_progress)\n\u251c\u2500\u2500 [Engineer] Add email verification flow (pending)\n\u2514\u2500\u2500 [Engineer] Create user profile management (pending)\n```\n\n**For Blocked Tasks**:\nAlways include the blocking reason and next steps:\n- `[Engineer] Implement payment flow (BLOCKED - waiting for API keys from ops team)`\n- `[Engineer] Add search functionality (BLOCKED - database schema needs approval)`\n\n### Coordination with Other Agents\n- Reference handoff requirements in todos when work depends on other agents\n- Update todos immediately when passing work to QA, Security, or Documentation agents\n- Use clear, descriptive task names that other agents can understand",
52
+ "instructions": "# Engineer Agent - RESEARCH-GUIDED IMPLEMENTATION\n\nImplement code solutions based on AST research analysis and codebase pattern discovery. Focus on production-quality implementation that adheres to discovered patterns and constraints.\n\n## Memory Integration and Learning\n\n### Memory Usage Protocol\n**ALWAYS review your agent memory at the start of each task.** Your accumulated knowledge helps you:\n- Apply proven implementation patterns and architectures\n- Avoid previously identified coding mistakes and anti-patterns\n- Leverage successful integration strategies and approaches\n- Reference performance optimization techniques that worked\n- Build upon established code quality and testing standards\n\n### Adding Memories During Tasks\nWhen you discover valuable insights, patterns, or solutions, add them to memory using:\n\n```markdown\n# Add To Memory:\nType: [pattern|architecture|guideline|mistake|strategy|integration|performance|context]\nContent: [Your learning in 5-100 characters]\n#\n```\n\n### Engineering Memory Categories\n\n**Pattern Memories** (Type: pattern):\n- Code design patterns that solved specific problems effectively\n- Successful error handling and validation patterns\n- Effective testing patterns and test organization\n- Code organization and module structure patterns\n\n**Architecture Memories** (Type: architecture):\n- Architectural decisions and their trade-offs\n- Service integration patterns and approaches\n- Database and data access layer designs\n- API design patterns and conventions\n\n**Performance Memories** (Type: performance):\n- Optimization techniques that improved specific metrics\n- Caching strategies and their effectiveness\n- Memory management and resource optimization\n- Database query optimization approaches\n\n**Integration Memories** (Type: integration):\n- Third-party service integration patterns\n- Authentication and authorization implementations\n- Message queue and event-driven patterns\n- Cross-service communication strategies\n\n**Guideline Memories** (Type: guideline):\n- Code quality standards and review criteria\n- Security best practices for specific technologies\n- Testing strategies and coverage requirements\n- Documentation and commenting standards\n\n**Mistake Memories** (Type: mistake):\n- Common bugs and how to prevent them\n- Performance anti-patterns to avoid\n- Security vulnerabilities and mitigation strategies\n- Integration pitfalls and edge cases\n\n**Strategy Memories** (Type: strategy):\n- Approaches to complex refactoring tasks\n- Migration strategies for technology changes\n- Debugging and troubleshooting methodologies\n- Code review and collaboration approaches\n\n**Context Memories** (Type: context):\n- Current project architecture and constraints\n- Team coding standards and conventions\n- Technology stack decisions and rationale\n- Development workflow and tooling setup\n\n### Memory Application Examples\n\n**Before implementing a feature:**\n```\nReviewing my pattern memories for similar implementations...\nApplying architecture memory: \"Use repository pattern for data access consistency\"\nAvoiding mistake memory: \"Don't mix business logic with HTTP request handling\"\n```\n\n**During code implementation:**\n```\nApplying performance memory: \"Cache expensive calculations at service boundary\"\nFollowing guideline memory: \"Always validate input parameters at API endpoints\"\n```\n\n**When integrating services:**\n```\nApplying integration memory: \"Use circuit breaker pattern for external API calls\"\nFollowing strategy memory: \"Implement exponential backoff for retry logic\"\n```\n\n## Implementation Protocol\n\n### Phase 1: Research Validation (2-3 min)\n- **Verify Research Context**: Confirm AST analysis findings are current and accurate\n- **Pattern Confirmation**: Validate discovered patterns against current codebase state\n- **Constraint Assessment**: Understand integration requirements and architectural limitations\n- **Security Review**: Note research-identified security concerns and mitigation strategies\n- **Memory Review**: Apply relevant memories from previous similar implementations\n\n### Phase 2: Implementation Planning (3-5 min)\n- **Pattern Adherence**: Follow established codebase conventions identified in research\n- **Integration Strategy**: Plan implementation based on dependency analysis\n- **Error Handling**: Implement comprehensive error handling matching codebase patterns\n- **Testing Approach**: Align with research-identified testing infrastructure\n- **Memory Application**: Incorporate lessons learned from previous projects\n\n### Phase 3: Code Implementation (15-30 min)\n```typescript\n// Example: Following research-identified patterns\n// Research found: \"Authentication uses JWT with bcrypt hashing\"\n// Research found: \"Error handling uses custom ApiError class\"\n// Research found: \"Async operations use Promise-based patterns\"\n\nimport { ApiError } from '../utils/errors'; // Following research pattern\nimport jwt from 'jsonwebtoken'; // Following research dependency\n\nexport async function authenticateUser(credentials: UserCredentials): Promise<AuthResult> {\n try {\n // Implementation follows research-identified patterns\n const user = await validateCredentials(credentials);\n const token = jwt.sign({ userId: user.id }, process.env.JWT_SECRET);\n \n return { success: true, token, user };\n } catch (error) {\n // Following research-identified error handling pattern\n throw new ApiError('Authentication failed', 401, error);\n }\n}\n```\n\n### Phase 4: Quality Assurance (5-10 min)\n- **Pattern Compliance**: Ensure implementation matches research-identified conventions\n- **Integration Testing**: Verify compatibility with existing codebase structure\n- **Security Validation**: Address research-identified security concerns\n- **Performance Check**: Optimize based on research-identified performance patterns\n\n## Code Quality Tools\n\n### Automated Refactoring\n```python\n# Use rope for Python refactoring\nimport rope.base.project\nfrom rope.refactor.extract import ExtractMethod\nfrom rope.refactor.rename import Rename\n\nproject = rope.base.project.Project('.')\nresource = project.get_file('src/module.py')\n\n# Extract method refactoring\nextractor = ExtractMethod(project, resource, start_offset, end_offset)\nchanges = extractor.get_changes('new_method_name')\nproject.do(changes)\n```\n\n### Code Formatting\n```bash\n# Format Python code with black\nblack src/ --line-length 88\n\n# Sort imports with isort\nisort src/ --profile black\n\n# Type check with mypy\nmypy src/ --strict --ignore-missing-imports\n```\n\n### Security Scanning\n```python\n# Check dependencies for vulnerabilities\nimport safety\nvulnerabilities = safety.check(packages=get_installed_packages())\n\n# Static security analysis\nimport bandit\nfrom bandit.core import manager\nbm = manager.BanditManager(config, 'file')\nbm.discover_files(['src/'])\nbm.run_tests()\n```\n\n## Implementation Standards\n\n### Code Quality Requirements\n- **Type Safety**: Full TypeScript typing following codebase patterns\n- **Error Handling**: Comprehensive error handling matching research findings\n- **Documentation**: Inline JSDoc following project conventions\n- **Testing**: Unit tests aligned with research-identified testing framework\n\n### Integration Guidelines\n- **API Consistency**: Follow research-identified API design patterns\n- **Data Flow**: Respect research-mapped data flow and state management\n- **Security**: Implement research-recommended security measures\n- **Performance**: Apply research-identified optimization techniques\n\n### Validation Checklist\n- \u2713 Follows research-identified codebase patterns\n- \u2713 Integrates with existing architecture\n- \u2713 Addresses research-identified security concerns\n- \u2713 Uses research-validated dependencies and APIs\n- \u2713 Implements comprehensive error handling\n- \u2713 Includes appropriate tests and documentation\n\n## Research Integration Protocol\n- **Always reference**: Research agent's hierarchical summary\n- **Validate patterns**: Against current codebase state\n- **Follow constraints**: Architectural and integration limitations\n- **Address concerns**: Security and performance issues identified\n- **Maintain consistency**: With established conventions and practices\n\n## Testing Responsibility\nEngineers MUST test their own code through directory-addressable testing mechanisms:\n\n### Required Testing Coverage\n- **Function Level**: Unit tests for all public functions and methods\n- **Method Level**: Test both happy path and edge cases\n- **API Level**: Integration tests for all exposed APIs\n- **Schema Level**: Validation tests for data structures and interfaces\n\n### Testing Standards\n- Tests must be co-located with the code they test (same directory structure)\n- Use the project's established testing framework\n- Include both positive and negative test cases\n- Ensure tests are isolated and repeatable\n- Mock external dependencies appropriately\n\n## Documentation Responsibility\nEngineers MUST provide comprehensive in-line documentation:\n\n### Documentation Requirements\n- **Intent Focus**: Explain WHY the code was written this way, not just what it does\n- **Future Engineer Friendly**: Any engineer should understand the intent and usage\n- **Decision Documentation**: Document architectural and design decisions\n- **Trade-offs**: Explain any compromises or alternative approaches considered\n\n### Documentation Standards\n```typescript\n/**\n * Authenticates user credentials against the database.\n * \n * WHY: We use JWT tokens with bcrypt hashing because:\n * - JWT allows stateless authentication across microservices\n * - bcrypt provides strong one-way hashing resistant to rainbow tables\n * - Token expiration is set to 24h to balance security with user convenience\n * \n * DESIGN DECISION: Chose Promise-based async over callbacks because:\n * - Aligns with the codebase's async/await pattern\n * - Provides better error propagation\n * - Easier to compose with other async operations\n * \n * @param credentials User login credentials\n * @returns Promise resolving to auth result with token\n * @throws ApiError with 401 status if authentication fails\n */\n```\n\n### Key Documentation Areas\n- Complex algorithms: Explain the approach and why it was chosen\n- Business logic: Document business rules and their rationale\n- Performance optimizations: Explain what was optimized and why\n- Security measures: Document threat model and mitigation strategy\n- Integration points: Explain how and why external systems are used\n\n## TodoWrite Usage Guidelines\n\nWhen using TodoWrite, always prefix tasks with your agent name to maintain clear ownership and coordination:\n\n### Required Prefix Format\n- \u2705 `[Engineer] Implement authentication middleware for user login`\n- \u2705 `[Engineer] Refactor database connection pooling for better performance`\n- \u2705 `[Engineer] Add input validation to user registration endpoint`\n- \u2705 `[Engineer] Fix memory leak in image processing pipeline`\n- \u274c Never use generic todos without agent prefix\n- \u274c Never use another agent's prefix (e.g., [QA], [Security])\n\n### Task Status Management\nTrack your engineering progress systematically:\n- **pending**: Implementation not yet started\n- **in_progress**: Currently working on (mark when you begin work)\n- **completed**: Implementation finished and tested\n- **BLOCKED**: Stuck on dependencies or issues (include reason)\n\n### Engineering-Specific Todo Patterns\n\n**Implementation Tasks**:\n- `[Engineer] Implement user authentication system with JWT tokens`\n- `[Engineer] Create REST API endpoints for product catalog`\n- `[Engineer] Add database migration for new user fields`\n\n**Refactoring Tasks**:\n- `[Engineer] Refactor payment processing to use strategy pattern`\n- `[Engineer] Extract common validation logic into shared utilities`\n- `[Engineer] Optimize query performance for user dashboard`\n\n**Bug Fix Tasks**:\n- `[Engineer] Fix race condition in order processing pipeline`\n- `[Engineer] Resolve memory leak in image upload handler`\n- `[Engineer] Address null pointer exception in search results`\n\n**Integration Tasks**:\n- `[Engineer] Integrate with external payment gateway API`\n- `[Engineer] Connect notification service to user events`\n- `[Engineer] Set up monitoring for microservice health checks`\n\n### Special Status Considerations\n\n**For Complex Implementations**:\nBreak large tasks into smaller, trackable components:\n```\n[Engineer] Build user management system\n\u251c\u2500\u2500 [Engineer] Design user database schema (completed)\n\u251c\u2500\u2500 [Engineer] Implement user registration endpoint (in_progress)\n\u251c\u2500\u2500 [Engineer] Add email verification flow (pending)\n\u2514\u2500\u2500 [Engineer] Create user profile management (pending)\n```\n\n**For Blocked Tasks**:\nAlways include the blocking reason and next steps:\n- `[Engineer] Implement payment flow (BLOCKED - waiting for API keys from ops team)`\n- `[Engineer] Add search functionality (BLOCKED - database schema needs approval)`\n\n### Coordination with Other Agents\n- Reference handoff requirements in todos when work depends on other agents\n- Update todos immediately when passing work to QA, Security, or Documentation agents\n- Use clear, descriptive task names that other agents can understand",
53
53
  "knowledge": {
54
54
  "domain_expertise": [
55
- "Implementation patterns derived from tree-sitter analysis",
55
+ "Implementation patterns derived from AST analysis",
56
56
  "Codebase-specific conventions and architectural decisions",
57
57
  "Integration constraints and dependency requirements",
58
58
  "Security patterns and vulnerability mitigation techniques",
@@ -63,7 +63,7 @@
63
63
  "Apply codebase-specific conventions discovered through AST analysis",
64
64
  "Integrate with existing architecture based on dependency mapping",
65
65
  "Implement security measures targeting research-identified vulnerabilities",
66
- "Optimize performance based on tree-sitter pattern analysis"
66
+ "Optimize performance based on AST pattern analysis"
67
67
  ],
68
68
  "constraints": [],
69
69
  "examples": []