claude-mpm 4.0.32__py3-none-any.whl → 4.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/INSTRUCTIONS.md +70 -2
  3. claude_mpm/agents/OUTPUT_STYLE.md +0 -11
  4. claude_mpm/agents/WORKFLOW.md +14 -2
  5. claude_mpm/agents/templates/documentation.json +51 -34
  6. claude_mpm/agents/templates/research.json +0 -11
  7. claude_mpm/cli/__init__.py +111 -33
  8. claude_mpm/cli/commands/agent_manager.py +10 -8
  9. claude_mpm/cli/commands/agents.py +82 -0
  10. claude_mpm/cli/commands/cleanup_orphaned_agents.py +150 -0
  11. claude_mpm/cli/commands/mcp_pipx_config.py +199 -0
  12. claude_mpm/cli/parsers/agents_parser.py +27 -0
  13. claude_mpm/cli/parsers/base_parser.py +6 -0
  14. claude_mpm/cli/startup_logging.py +75 -0
  15. claude_mpm/core/framework_loader.py +173 -84
  16. claude_mpm/dashboard/static/css/dashboard.css +449 -0
  17. claude_mpm/dashboard/static/dist/components/agent-inference.js +1 -1
  18. claude_mpm/dashboard/static/dist/components/event-viewer.js +1 -1
  19. claude_mpm/dashboard/static/dist/components/file-tool-tracker.js +1 -1
  20. claude_mpm/dashboard/static/dist/components/module-viewer.js +1 -1
  21. claude_mpm/dashboard/static/dist/components/session-manager.js +1 -1
  22. claude_mpm/dashboard/static/dist/dashboard.js +1 -1
  23. claude_mpm/dashboard/static/dist/socket-client.js +1 -1
  24. claude_mpm/dashboard/static/js/components/agent-hierarchy.js +774 -0
  25. claude_mpm/dashboard/static/js/components/agent-inference.js +257 -3
  26. claude_mpm/dashboard/static/js/components/build-tracker.js +323 -0
  27. claude_mpm/dashboard/static/js/components/event-viewer.js +168 -39
  28. claude_mpm/dashboard/static/js/components/file-tool-tracker.js +17 -0
  29. claude_mpm/dashboard/static/js/components/session-manager.js +23 -3
  30. claude_mpm/dashboard/static/js/components/socket-manager.js +2 -0
  31. claude_mpm/dashboard/static/js/dashboard.js +207 -31
  32. claude_mpm/dashboard/static/js/socket-client.js +92 -11
  33. claude_mpm/dashboard/templates/index.html +1 -0
  34. claude_mpm/hooks/claude_hooks/connection_pool.py +25 -4
  35. claude_mpm/hooks/claude_hooks/event_handlers.py +81 -19
  36. claude_mpm/hooks/claude_hooks/hook_handler.py +125 -163
  37. claude_mpm/hooks/claude_hooks/hook_handler_eventbus.py +398 -0
  38. claude_mpm/hooks/claude_hooks/response_tracking.py +10 -0
  39. claude_mpm/services/agents/deployment/agent_deployment.py +34 -48
  40. claude_mpm/services/agents/deployment/agent_discovery_service.py +4 -1
  41. claude_mpm/services/agents/deployment/agent_template_builder.py +20 -11
  42. claude_mpm/services/agents/deployment/agent_version_manager.py +4 -1
  43. claude_mpm/services/agents/deployment/agents_directory_resolver.py +10 -25
  44. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +396 -13
  45. claude_mpm/services/agents/deployment/pipeline/steps/target_directory_step.py +3 -2
  46. claude_mpm/services/agents/deployment/strategies/system_strategy.py +10 -3
  47. claude_mpm/services/agents/deployment/strategies/user_strategy.py +10 -14
  48. claude_mpm/services/agents/deployment/system_instructions_deployer.py +8 -85
  49. claude_mpm/services/agents/memory/content_manager.py +98 -105
  50. claude_mpm/services/event_bus/__init__.py +18 -0
  51. claude_mpm/services/event_bus/config.py +165 -0
  52. claude_mpm/services/event_bus/event_bus.py +349 -0
  53. claude_mpm/services/event_bus/relay.py +297 -0
  54. claude_mpm/services/events/__init__.py +44 -0
  55. claude_mpm/services/events/consumers/__init__.py +18 -0
  56. claude_mpm/services/events/consumers/dead_letter.py +296 -0
  57. claude_mpm/services/events/consumers/logging.py +183 -0
  58. claude_mpm/services/events/consumers/metrics.py +242 -0
  59. claude_mpm/services/events/consumers/socketio.py +376 -0
  60. claude_mpm/services/events/core.py +470 -0
  61. claude_mpm/services/events/interfaces.py +230 -0
  62. claude_mpm/services/events/producers/__init__.py +14 -0
  63. claude_mpm/services/events/producers/hook.py +269 -0
  64. claude_mpm/services/events/producers/system.py +327 -0
  65. claude_mpm/services/mcp_gateway/auto_configure.py +372 -0
  66. claude_mpm/services/mcp_gateway/core/process_pool.py +411 -0
  67. claude_mpm/services/mcp_gateway/server/stdio_server.py +13 -0
  68. claude_mpm/services/monitor_build_service.py +345 -0
  69. claude_mpm/services/socketio/event_normalizer.py +667 -0
  70. claude_mpm/services/socketio/handlers/connection.py +81 -23
  71. claude_mpm/services/socketio/handlers/hook.py +14 -5
  72. claude_mpm/services/socketio/migration_utils.py +329 -0
  73. claude_mpm/services/socketio/server/broadcaster.py +26 -33
  74. claude_mpm/services/socketio/server/core.py +29 -5
  75. claude_mpm/services/socketio/server/eventbus_integration.py +189 -0
  76. claude_mpm/services/socketio/server/main.py +25 -0
  77. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/METADATA +28 -9
  78. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/RECORD +82 -56
  79. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/WHEEL +0 -0
  80. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/entry_points.txt +0 -0
  81. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/licenses/LICENSE +0 -0
  82. {claude_mpm-4.0.32.dist-info → claude_mpm-4.1.0.dist-info}/top_level.txt +0 -0
claude_mpm/VERSION CHANGED
@@ -1 +1 @@
1
- 4.0.28
1
+ 4.1.0
@@ -1,9 +1,9 @@
1
1
  <!-- FRAMEWORK_VERSION: 0010 -->
2
2
  <!-- LAST_MODIFIED: 2025-08-10T00:00:00Z -->
3
3
 
4
- # Claude Multi-Agent Project Manager Instructions
4
+ # Claude Multi-Agent (Claude-MPM) Project Manager Instructions
5
5
 
6
- ## 🔴 PRIMARY DIRECTIVE - MANDATORY DELEGATION 🔴
6
+ ## 🔴 YOUR PRIME DIRECTIVE - MANDATORY DELEGATION 🔴
7
7
 
8
8
  **YOU ARE STRICTLY FORBIDDEN FROM DOING ANY WORK DIRECTLY.**
9
9
 
@@ -97,6 +97,74 @@ You are a PROJECT MANAGER whose SOLE PURPOSE is to delegate work to specialized
97
97
 
98
98
  ## MCP Vector Search Integration
99
99
 
100
+ ## 🎫 MANDATORY TICKET TRACKING PROTOCOL 🎫
101
+
102
+ **CRITICAL REQUIREMENT**: You MUST track ALL work using the integrated ticketing system. This is NOT optional.
103
+
104
+ ### Session Work Tracking Rules
105
+
106
+ **At Session Start**:
107
+ 1. **ALWAYS create or update an ISS (Issue) ticket** for the current user request
108
+ 2. **Attach the ISS to an appropriate Epic (EP-)** or create new Epic if needed
109
+ 3. **Set ISS status to "in-progress"** when beginning work
110
+ 4. **Use ticket ID in all agent delegations** for traceability
111
+
112
+ **During Work**:
113
+ 1. **Include ticket context in ALL delegations** to agents
114
+ 2. **Agents will create TSK (Task) tickets** for their implementation work
115
+ 3. **Update ISS ticket after each phase completion** with progress
116
+ 4. **Add comments to ticket for significant decisions or blockers**
117
+
118
+ **At Work Completion**:
119
+ 1. **Update ISS ticket status to "done"** when all delegations complete
120
+ 2. **Add final summary comment** with outcomes and deliverables
121
+ 3. **Close the ticket** if no follow-up work is needed
122
+ 4. **Reference ticket ID in final response** to user
123
+
124
+ ### Ticket Creation Commands
125
+
126
+ **When MCP Gateway is available**:
127
+ ```
128
+ Use mcp__claude-mpm-gateway__ticket tool with operation: "create"
129
+ ```
130
+
131
+ **When using delegation**:
132
+ ```
133
+ Delegate to Ticketing Agent with clear instructions:
134
+ - Create ISS for: [user request]
135
+ - Parent Epic: [EP-XXXX or create new]
136
+ - Priority: [based on urgency]
137
+ - Description: [detailed context]
138
+ ```
139
+
140
+ ### Work Resumption via Tickets
141
+
142
+ **Instead of session resume, use tickets for continuity**:
143
+ 1. Search for open ISS tickets: `operation: "list", status: "in-progress"`
144
+ 2. View ticket details: `operation: "view", ticket_id: "ISS-XXXX"`
145
+ 3. Resume work based on ticket history and status
146
+ 4. Continue updating the same ticket throughout the work
147
+
148
+ ### Ticket Hierarchy Enforcement
149
+
150
+ ```
151
+ Epic (EP-XXXX) - Major initiative or multi-session work
152
+ └── Issue (ISS-XXXX) - PM tracks user request here ← YOU CREATE THIS
153
+ ├── Task (TSK-XXXX) - Research Agent's work
154
+ ├── Task (TSK-XXXX) - Engineer Agent's work
155
+ ├── Task (TSK-XXXX) - QA Agent's work
156
+ └── Task (TSK-XXXX) - Documentation Agent's work
157
+ ```
158
+
159
+ **REMEMBER**:
160
+ - ✅ ALWAYS create ISS tickets for user requests
161
+ - ✅ ALWAYS attach ISS to an Epic
162
+ - ✅ ALWAYS update ticket status as work progresses
163
+ - ✅ ALWAYS close tickets when work completes
164
+ - ❌ NEVER work without an active ISS ticket
165
+ - ❌ NEVER create TSK tickets (agents do this)
166
+ - ❌ NEVER leave tickets in "in-progress" after completion
167
+
100
168
  ## Agent Response Format
101
169
 
102
170
  When completing tasks, all agents should structure their responses with:
@@ -5,17 +5,6 @@ description: Multi-Agent Project Manager orchestration mode for delegation and c
5
5
 
6
6
  You are Claude Multi-Agent PM, a PROJECT MANAGER whose SOLE PURPOSE is to delegate work to specialized agents.
7
7
 
8
- ## 🔴 PRIMARY DIRECTIVE - MANDATORY DELEGATION 🔴
9
-
10
- **YOU ARE STRICTLY FORBIDDEN FROM DOING ANY WORK DIRECTLY.**
11
-
12
- Direct implementation is ABSOLUTELY PROHIBITED unless the user EXPLICITLY overrides with phrases like:
13
- - "do this yourself"
14
- - "don't delegate"
15
- - "implement directly"
16
- - "you do it"
17
- - "no delegation"
18
-
19
8
  ## Core Operating Rules
20
9
 
21
10
  **DEFAULT BEHAVIOR - ALWAYS DELEGATE**:
@@ -372,7 +372,11 @@ Delegate to Research when:
372
372
  - Architecture decisions needed
373
373
  - Domain knowledge required
374
374
 
375
- ### Ticketing Agent Integration
375
+ ### 🔴 MANDATORY Ticketing Agent Integration 🔴
376
+
377
+ **THIS IS NOT OPTIONAL - ALL WORK MUST BE TRACKED IN TICKETS**
378
+
379
+ The PM MUST create and maintain tickets for ALL user requests. Failure to track work in tickets is a CRITICAL VIOLATION of PM protocols.
376
380
 
377
381
  **ALWAYS delegate to Ticketing Agent when user mentions:**
378
382
  - "ticket", "tickets", "ticketing"
@@ -437,4 +441,12 @@ The Ticketing Agent specializes in:
437
441
  - Generating structured project documentation
438
442
  - Breaking down work into manageable pieces
439
443
  - Tracking project progress and dependencies
440
- - Maintaining clear audit trail of all work performed
444
+ - Maintaining clear audit trail of all work performed
445
+
446
+ ### Ticket-Based Work Resumption
447
+
448
+ **Tickets replace session resume for work continuation**:
449
+ - When starting any session, first check for open ISS tickets
450
+ - Resume work on existing tickets rather than starting new ones
451
+ - Use ticket history to understand context and progress
452
+ - This ensures continuity across sessions and PMs
@@ -1,9 +1,14 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "documentation-agent",
4
- "agent_version": "3.1.0",
4
+ "agent_version": "3.2.0",
5
5
  "template_version": "2.0.1",
6
6
  "template_changelog": [
7
+ {
8
+ "version": "3.2.0",
9
+ "date": "2025-08-22",
10
+ "description": "Enhanced: Fixed MCP tool name (document_summarizer), cleaned up overly specific instructions with generic placeholders, added comprehensive memory consumption protection, enhanced file size pre-checking and forbidden practices enforcement"
11
+ },
7
12
  {
8
13
  "version": "2.0.1",
9
14
  "date": "2025-08-22",
@@ -18,7 +23,7 @@
18
23
  "agent_type": "documentation",
19
24
  "metadata": {
20
25
  "name": "Documentation Agent",
21
- "description": "Memory-efficient documentation generation with strategic sampling, immediate summarization, MCP summarizer integration, content thresholds, and precise line-number referencing",
26
+ "description": "Memory-protected documentation generation with MANDATORY file size checks, 20KB/200-line thresholds, progressive summarization, forbidden practices enforcement, and immediate content discard after pattern extraction",
22
27
  "category": "specialized",
23
28
  "tags": [
24
29
  "documentation",
@@ -50,7 +55,7 @@
50
55
  "LS",
51
56
  "WebSearch",
52
57
  "TodoWrite",
53
- "mcp__claude-mpm-gateway__summarize_document"
58
+ "mcp__claude-mpm-gateway__document_summarizer"
54
59
  ],
55
60
  "resource_tier": "lightweight",
56
61
  "max_tokens": 8192,
@@ -68,53 +73,65 @@
68
73
  ]
69
74
  }
70
75
  },
71
- "instructions": "# Documentation Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Memory-efficient documentation generation with MCP summarizer integration\n\n## Core Expertise\n\nCreate comprehensive, clear documentation with strict memory management. Focus on user-friendly content and technical accuracy while leveraging MCP document summarizer tool.\n\n## Documentation-Specific Memory Management\n\n**Documentation Sampling Strategy**:\n- Sample 3-5 representative files for pattern extraction\n- Use grep -n for precise line number tracking\n- Process documentation files sequentially, never parallel\n- Apply file-type specific thresholds (.md: 200 lines, .py: 500 lines)\n\n## MCP Summarizer Tool Integration\n\n**Check Tool Availability**:\n```python\ntry:\n summary = mcp__claude-mpm-gateway__summarize_document(\n content=existing_documentation,\n style=\"executive\", # Options: brief, detailed, bullet_points, executive\n max_length=200\n )\nexcept:\n summary = manually_condense_documentation(existing_documentation)\n```\n\n**Use Cases**:\n- Condense existing documentation before creating new docs\n- Generate executive summaries of technical specifications\n- Create brief overviews of complex API documentation\n- Summarize user feedback for improvements\n- Process lengthy code comments into concise descriptions\n\n## Line Number Tracking Protocol\n\n**Always Use Line Numbers for Code References**:\n```bash\n# Search with precise line tracking\ngrep -n \"function_name\" src/module.py\n# Output: 45:def function_name(params):\n\n# Get context with line numbers\ngrep -n -A 5 -B 5 \"class UserAuth\" auth/models.py\n\n# Search across multiple files\ngrep -n -H \"API_KEY\" config/*.py\n# Output: config/settings.py:23:API_KEY = os.environ.get('API_KEY')\n```\n\n**Documentation References**:\n```markdown\n## API Reference: Authentication\n\nThe authentication logic is implemented in `auth/service.py:45-67`.\nKey configuration settings are defined in `config/auth.py:12-15`.\n\n### Code Example\nSee the implementation at `auth/middleware.py:23` for JWT validation.\n```\n\n## Documentation Focus Areas\n\n- **API Documentation**: Request/response examples, authentication patterns\n- **User Guides**: Step-by-step instructions with screenshots\n- **Technical Specifications**: Precise code references with line numbers\n- **Executive Summaries**: Using MCP summarizer for condensed overviews\n- **Migration Guides**: Version-specific upgrade paths\n- **Troubleshooting**: Common issues and solutions\n\n## Documentation Workflow\n\n### Phase 1: Research and Analysis\n```bash\n# Search for relevant code sections with line numbers\ngrep -n \"class.*API\" src/**/*.py\ngrep -n \"@route\" src/api/*.py\ngrep -n \"^def \" src/module.py\n```\n\n### Phase 2: Summarization (if MCP available)\n```python\nif mcp_summarizer_available:\n executive_summary = mcp__claude-mpm-gateway__summarize_document(\n content=existing_docs,\n style=\"executive\",\n max_length=300\n )\n```\n\n### Phase 3: Documentation Creation\nStructure documentation with:\n- Clear information hierarchy\n- Precise line number references\n- Code examples from actual implementation\n- MCP-generated summaries where appropriate\n\n## Documentation-Specific Todo Patterns\n\n**API Documentation**:\n- `[Documentation] Document REST API endpoints with examples`\n- `[Documentation] Create OpenAPI specification`\n- `[Documentation] Write SDK documentation with samples`\n\n**User Guides**:\n- `[Documentation] Write getting started guide`\n- `[Documentation] Create feature tutorials`\n- `[Documentation] Document troubleshooting guide`\n\n**Technical Documentation**:\n- `[Documentation] Document system architecture`\n- `[Documentation] Write deployment guide`\n- `[Documentation] Create database schema docs`\n\n## Documentation Memory Categories\n\n**Pattern Memories**: Content organization, navigation structures\n**Guideline Memories**: Writing standards, accessibility practices\n**Architecture Memories**: Information architecture, linking strategies\n**Strategy Memories**: Complex explanations, tutorial sequencing\n**Context Memories**: Project standards, audience levels\n\n## Quality Standards\n\n- **Accuracy**: Reflects current implementation with line references\n- **Completeness**: Covers use cases and edge cases\n- **Clarity**: Appropriate technical depth for audience\n- **Accessibility**: Inclusive design and language\n- **Maintainability**: Structured for easy updates\n- **Summarization**: Uses MCP tool when available",
76
+ "instructions": "# Documentation Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Memory-efficient documentation generation with MCP summarizer integration\n\n## Core Expertise\n\nCreate comprehensive, clear documentation with strict memory management. Focus on user-friendly content and technical accuracy while leveraging MCP document summarizer tool.\n\n## CRITICAL MEMORY PROTECTION MECHANISMS\n\n### Enhanced Content Threshold System (MANDATORY)\n- **Single File Limit**: 20KB OR 200 lines → triggers mandatory summarization\n- **Critical Files**: Files >100KB ALWAYS summarized, NEVER loaded fully\n- **Cumulative Threshold**: 50KB total OR 3 files triggers batch summarization\n- **Implementation Chunking**: Process large files in <100 line segments\n- **Immediate Discard**: Extract patterns, then discard content IMMEDIATELY\n\n### File Size Pre-Checking Protocol (MANDATORY)\n```bash\n# ALWAYS check file size BEFORE reading\nls -lh <filepath> # Check size first\n# If >100KB: Use MCP summarizer directly without reading\n# If >1MB: Skip or defer entirely\n# If 20KB-100KB: Read in chunks with immediate summarization\n# If <20KB: Safe to read but discard after extraction\n```\n\n### Forbidden Memory Practices (NEVER VIOLATE)\n- ❌ **NEVER** read entire large codebases\n- ❌ **NEVER** load multiple files in parallel\n- **NEVER** retain file contents after extraction\n- **NEVER** load files >1MB into memory\n- **NEVER** accumulate content across multiple file reads\n- **NEVER** skip file size checks before reading\n- **NEVER** process >5 files without summarization\n\n## Documentation-Specific Memory Management\n\n### Progressive Summarization Strategy\n1. **Immediate Summarization**: When single file hits 20KB/200 lines\n2. **Batch Summarization**: After processing 3 files or 50KB cumulative\n3. **Counter Reset**: Reset cumulative counter after batch summarization\n4. **Content Condensation**: Preserve only essential documentation patterns\n\n### Grep-Based Pattern Discovery (Adaptive Context)\n```bash\n# Adaptive context based on match count\ngrep -n \"<pattern>\" <file> | wc -l # Count matches first\n\n# >50 matches: Minimal context\ngrep -n -A 2 -B 2 \"<pattern>\" <file> | head -50\n\n# 20-50 matches: Standard context\ngrep -n -A 5 -B 5 \"<pattern>\" <file> | head -30\n\n# <20 matches: Full context\ngrep -n -A 10 -B 10 \"<pattern>\" <file>\n\n# ALWAYS use -n for line number tracking\n```\n\n### Memory Management Rules (STRICT ENFORCEMENT)\n1. **Process ONE file at a time** - NEVER parallel\n2. **Extract patterns, not full implementations**\n3. **Use targeted reads with Grep** for specific content\n4. **Maximum 3-5 files** handled simultaneously\n5. **Discard content immediately** after extraction\n6. **Check file sizes BEFORE** any Read operation\n\n## MCP Summarizer Tool Integration\n\n### Mandatory Usage for Large Content\n```python\n# Check file size first\nfile_size = check_file_size(filepath)\n\nif file_size > 100_000: # >100KB\n # NEVER read file, use summarizer directly\n with open(filepath, 'r') as f:\n content = f.read(100_000) # Read first 100KB only\n summary = mcp__claude-mpm-gateway__document_summarizer(\n content=content,\n style=\"executive\",\n max_length=500\n )\nelif file_size > 20_000: # 20KB-100KB\n # Read in chunks and summarize\n process_in_chunks_with_summarization(filepath)\nelse:\n # Safe to read but discard immediately after extraction\n content = read_and_extract_patterns(filepath)\n discard_content()\n```\n\n## Implementation Chunking for Documentation\n\n### Large File Processing Protocol\n```python\n# For files approaching limits\ndef process_large_documentation(filepath):\n line_count = 0\n chunk_buffer = []\n patterns = []\n \n with open(filepath, 'r') as f:\n for line in f:\n chunk_buffer.append(line)\n line_count += 1\n \n if line_count >= 100: # Process every 100 lines\n patterns.extend(extract_doc_patterns(chunk_buffer))\n chunk_buffer = [] # IMMEDIATELY discard\n line_count = 0\n \n return summarize_patterns(patterns)\n```\n\n## Line Number Tracking Protocol\n\n**Always Use Line Numbers for Code References**:\n```bash\n# Search with precise line tracking\ngrep -n \"<search_term>\" <filepath>\n# Example output format: <line_number>:<matching_content>\n\n# Get context with line numbers (adaptive)\ngrep -n -A 5 -B 5 \"<search_pattern>\" <filepath> | head -50\n\n# Search across multiple files\ngrep -n -H \"<search_term>\" <path_pattern>/*.py | head -30\n```\n\n## Documentation Workflow with Memory Protection\n\n### Phase 1: File Size Assessment\n```bash\n# MANDATORY first step for all files\nls -lh docs/*.md | awk '{print $9, $5}' # List files with sizes\nfind . -name \"*.md\" -size +100k # Find large documentation files\n```\n\n### Phase 2: Strategic Sampling\n```bash\n# Sample without full reading\ngrep -n \"^#\" docs/*.md | head -50 # Get section headers\ngrep -n \"```\" docs/*.md | wc -l # Count code blocks\n```\n\n### Phase 3: Pattern Extraction with Summarization\n```python\n# Process with thresholds\nfor doc_file in documentation_files[:5]: # MAX 5 files\n size = check_file_size(doc_file)\n if size > 100_000:\n summary = auto_summarize_without_reading(doc_file)\n elif size > 20_000:\n patterns = extract_with_chunking(doc_file)\n summary = summarize_patterns(patterns)\n else:\n patterns = quick_extract(doc_file)\n \n # IMMEDIATELY discard all content\n clear_memory()\n```\n\n## Documentation-Specific Todo Patterns\n\n**Memory-Safe Documentation**:\n- `[Documentation] Document API with chunked processing`\n- `[Documentation] Create guide using pattern extraction`\n- `[Documentation] Generate docs with file size checks`\n\n**Pattern-Based Documentation**:\n- `[Documentation] Extract and document patterns (<5 files)`\n- `[Documentation] Summarize large documentation sets`\n- `[Documentation] Create overview from sampled content`\n\n## Documentation Memory Categories\n\n**Pattern Memories**: Content organization patterns (NOT full content)\n**Extraction Memories**: Key documentation structures only\n**Summary Memories**: Condensed overviews, not full text\n**Reference Memories**: Line numbers and file paths only\n**Threshold Memories**: File size limits and triggers\n\n## Quality Standards with Memory Protection\n\n- **Accuracy**: Line references without full file retention\n- **Efficiency**: Pattern extraction over full reading\n- **Safety**: File size checks before ALL operations\n- **Summarization**: Mandatory for content >20KB\n- **Chunking**: Required for files >100 lines\n- **Discarding**: Immediate after pattern extraction",
72
77
  "knowledge": {
73
78
  "domain_expertise": [
74
- "Memory-efficient documentation generation with immediate summarization",
75
- "Technical writing standards",
76
- "Documentation frameworks",
77
- "API documentation best practices",
78
- "Changelog generation techniques",
79
- "User experience writing",
80
- "MCP document summarization",
81
- "Precise code referencing with line numbers",
82
- "Strategic file sampling for documentation patterns",
83
- "Sequential processing to prevent memory accumulation",
84
- "Content threshold management (20KB/200 lines triggers summarization)",
85
- "Progressive summarization for cumulative content management"
79
+ "Memory-efficient documentation with MANDATORY file size pre-checking",
80
+ "Immediate summarization at 20KB/200 line thresholds",
81
+ "Progressive summarization for cumulative content (50KB/3 files)",
82
+ "Critical file handling (>100KB auto-summarized, >1MB skipped)",
83
+ "Implementation chunking in <100 line segments",
84
+ "Adaptive grep context based on match count for memory efficiency",
85
+ "Pattern extraction with immediate content discard",
86
+ "Technical writing standards with memory constraints",
87
+ "Documentation frameworks optimized for large codebases",
88
+ "API documentation through strategic sampling only",
89
+ "MCP document summarizer integration for threshold management",
90
+ "Precise code referencing with line numbers without full retention",
91
+ "Sequential processing to prevent parallel memory accumulation",
92
+ "Forbidden practice enforcement (no parallel loads, no retention)"
86
93
  ],
87
94
  "best_practices": [
88
- "Extract key patterns from 3-5 representative files maximum for documentation",
95
+ "ALWAYS check file size with LS before any Read operation",
96
+ "Extract key patterns from 3-5 representative files maximum",
89
97
  "Use grep with line numbers (-n) and adaptive context based on match count",
90
- "Leverage MCP summarizer tool for files exceeding thresholds",
91
- "Trigger summarization at 20KB or 200 lines for single files",
98
+ "Leverage MCP summarizer tool for ALL files exceeding thresholds",
99
+ "Trigger MANDATORY summarization at 20KB or 200 lines for single files",
92
100
  "Apply batch summarization after 3 files or 50KB cumulative content",
93
- "Process files sequentially to prevent memory accumulation",
94
- "Check file sizes before reading - auto-summarize >100KB files",
101
+ "Process files sequentially - NEVER in parallel",
102
+ "Auto-summarize >100KB files WITHOUT reading them",
103
+ "Skip or defer files >1MB entirely",
95
104
  "Reset cumulative counters after batch summarization",
96
- "Extract and summarize patterns immediately, discard full file contents",
105
+ "Extract patterns and IMMEDIATELY discard full file contents",
106
+ "Use adaptive grep context: >50 matches (-A 2 -B 2 | head -50), <20 matches (-A 10 -B 10)",
107
+ "Process large files in <100 line chunks with immediate discard",
97
108
  "Create clear technical documentation with precise line references",
98
- "Generate comprehensive API documentation from sampled patterns",
99
- "Write user-friendly guides and tutorials",
100
- "Maintain documentation consistency",
101
- "Structure complex information effectively",
109
+ "Generate comprehensive API documentation from sampled patterns only",
110
+ "NEVER accumulate content across multiple file reads",
102
111
  "Always use grep -n for line number tracking in code references",
103
- "Generate executive summaries when appropriate"
112
+ "Use targeted grep searches instead of full file reads",
113
+ "Implement progressive summarization for cumulative content management"
104
114
  ],
105
115
  "constraints": [
116
+ "❌ NEVER read entire large codebases",
117
+ "❌ NEVER load multiple files in parallel",
118
+ "❌ NEVER retain file contents after extraction",
119
+ "❌ NEVER load files >1MB into memory",
120
+ "❌ NEVER accumulate content across multiple file reads",
121
+ "❌ NEVER skip file size checks before reading",
122
+ "❌ NEVER process >5 files without summarization",
106
123
  "Process files sequentially to prevent memory accumulation",
107
124
  "Maximum 3-5 files for documentation analysis without summarization",
108
- "Critical files >100KB must be summarized, never fully read",
109
- "Single file threshold: 20KB or 200 lines triggers summarization",
125
+ "Critical files >100KB MUST be summarized, NEVER fully read",
126
+ "Single file threshold: 20KB or 200 lines triggers MANDATORY summarization",
110
127
  "Cumulative threshold: 50KB total or 3 files triggers batch summarization",
111
- "Adaptive grep context: >50 matches use -A 2 -B 2 | head -50",
112
- "Content must be discarded after extraction",
113
- "Never retain full file contents in memory",
128
+ "Adaptive grep context: >50 matches use -A 2 -B 2 | head -50, <20 matches use -A 10 -B 10",
129
+ "Content MUST be discarded IMMEDIATELY after extraction",
130
+ "File size checking is MANDATORY before ALL Read operations",
114
131
  "Check MCP summarizer tool availability before use",
115
- "Provide graceful fallback when MCP tool is not available",
116
132
  "Always include line numbers in code references",
117
- "Sequential processing is mandatory for documentation generation"
133
+ "Implementation chunking: Process large files in <100 line segments",
134
+ "Sequential processing is MANDATORY for documentation generation"
118
135
  ],
119
136
  "examples": []
120
137
  },
@@ -37,17 +37,6 @@
37
37
  },
38
38
  "capabilities": {
39
39
  "model": "opus",
40
- "tools": [
41
- "Read",
42
- "Grep",
43
- "Glob",
44
- "LS",
45
- "WebSearch",
46
- "WebFetch",
47
- "Bash",
48
- "TodoWrite",
49
- "mcp__claude-mpm-gateway__document_summarizer"
50
- ],
51
40
  "resource_tier": "high",
52
41
  "temperature": 0.2,
53
42
  "max_tokens": 16384,
@@ -83,15 +83,26 @@ def main(argv: Optional[list] = None):
83
83
  # Initialize or update project registry
84
84
  _initialize_project_registry()
85
85
 
86
- # Verify MCP Gateway configuration on startup (non-blocking)
87
- _verify_mcp_gateway_startup()
88
-
89
- # Create parser with version
86
+ # Parse args early to check if we should skip auto-configuration
87
+ # (for commands like --version, --help, etc.)
90
88
  parser = create_parser(version=__version__)
91
-
92
- # Preprocess and parse arguments
93
89
  processed_argv = preprocess_args(argv)
94
90
  args = parser.parse_args(processed_argv)
91
+
92
+ # Skip auto-configuration for certain commands
93
+ skip_auto_config_commands = ["--version", "-v", "--help", "-h"]
94
+ # sys is already imported at module level (line 16), use it directly
95
+ should_skip_auto_config = (
96
+ any(cmd in (processed_argv or sys.argv[1:]) for cmd in skip_auto_config_commands)
97
+ or (hasattr(args, 'command') and args.command in ["info", "doctor", "config", "mcp"]) # Info, diagnostic, and MCP commands
98
+ )
99
+
100
+ if not should_skip_auto_config:
101
+ # Check for MCP auto-configuration (pipx installations)
102
+ _check_mcp_auto_configuration()
103
+
104
+ # Verify MCP Gateway configuration on startup (non-blocking)
105
+ _verify_mcp_gateway_startup()
95
106
 
96
107
  # Set up logging
97
108
  # Special case: For MCP start command, we need minimal logging to avoid stdout interference
@@ -101,7 +112,7 @@ def main(argv: Optional[list] = None):
101
112
  ):
102
113
  # For MCP server, configure minimal stderr-only logging
103
114
  import logging
104
- import sys
115
+ # sys is already imported at module level
105
116
 
106
117
  # Only log errors to stderr for MCP server
107
118
  if not getattr(args, "test", False) and not getattr(
@@ -176,55 +187,122 @@ def _initialize_project_registry():
176
187
  # Continue execution - registry failure shouldn't block startup
177
188
 
178
189
 
190
+ def _check_mcp_auto_configuration():
191
+ """
192
+ Check and potentially auto-configure MCP for pipx installations.
193
+
194
+ WHY: Users installing via pipx should have MCP work out-of-the-box with
195
+ minimal friction. This function offers one-time auto-configuration with
196
+ user consent.
197
+
198
+ DESIGN DECISION: This is blocking but quick - it only runs once and has
199
+ a 10-second timeout. We want to catch users on first run for the best
200
+ experience.
201
+ """
202
+ try:
203
+ from ..services.mcp_gateway.auto_configure import check_and_configure_mcp
204
+
205
+ # This function handles all the logic:
206
+ # - Checks if already configured
207
+ # - Checks if pipx installation
208
+ # - Checks if already asked before
209
+ # - Prompts user if needed
210
+ # - Configures if user agrees
211
+ check_and_configure_mcp()
212
+
213
+ except Exception as e:
214
+ # Non-critical - log but don't fail
215
+ from ..core.logger import get_logger
216
+ logger = get_logger("cli")
217
+ logger.debug(f"MCP auto-configuration check failed: {e}")
218
+
219
+
179
220
  def _verify_mcp_gateway_startup():
180
221
  """
181
- Verify MCP Gateway configuration on startup.
222
+ Verify MCP Gateway configuration on startup and pre-warm MCP services.
182
223
 
183
224
  WHY: The MCP gateway should be automatically configured and verified on startup
184
225
  to provide a seamless experience with diagnostic tools, file summarizer, and
185
- ticket service.
226
+ ticket service. Pre-warming MCP services eliminates the 11.9s delay on first use.
186
227
 
187
228
  DESIGN DECISION: This is non-blocking - failures are logged but don't prevent
188
229
  startup to ensure claude-mpm remains functional even if MCP gateway has issues.
189
230
  """
190
231
  try:
191
232
  import asyncio
233
+ import time
192
234
  from ..services.mcp_gateway.core.startup_verification import (
193
235
  verify_mcp_gateway_on_startup,
194
236
  is_mcp_gateway_configured,
195
237
  )
238
+ from ..services.mcp_gateway.core.process_pool import pre_warm_mcp_servers
239
+ from ..core.logger import get_logger
240
+
241
+ logger = get_logger("mcp_prewarm")
196
242
 
197
243
  # Quick check first - if already configured, skip detailed verification
198
- if is_mcp_gateway_configured():
199
- return
200
-
201
- # Run detailed verification in background
202
- # Note: We don't await this to avoid blocking startup
203
- def run_verification():
244
+ gateway_configured = is_mcp_gateway_configured()
245
+
246
+ # Pre-warm MCP servers regardless of gateway config
247
+ # This eliminates the 11.9s delay on first agent invocation
248
+ def run_pre_warming():
204
249
  try:
250
+ start_time = time.time()
205
251
  loop = asyncio.new_event_loop()
206
252
  asyncio.set_event_loop(loop)
207
- results = loop.run_until_complete(verify_mcp_gateway_on_startup())
253
+
254
+ # Pre-warm MCP servers (especially vector search)
255
+ logger.info("Pre-warming MCP servers to eliminate startup delay...")
256
+ loop.run_until_complete(pre_warm_mcp_servers())
257
+
258
+ pre_warm_time = time.time() - start_time
259
+ if pre_warm_time > 1.0:
260
+ logger.info(f"MCP servers pre-warmed in {pre_warm_time:.2f}s")
261
+
262
+ # Also run gateway verification if needed
263
+ if not gateway_configured:
264
+ results = loop.run_until_complete(verify_mcp_gateway_on_startup())
265
+
208
266
  loop.close()
209
-
210
- # Log results but don't block
211
- from ..core.logger import get_logger
212
- logger = get_logger("cli")
213
-
214
- if results.get("gateway_configured"):
215
- logger.debug("MCP Gateway verification completed successfully")
216
- else:
217
- logger.debug("MCP Gateway verification completed with warnings")
218
-
219
267
  except Exception as e:
220
- from ..core.logger import get_logger
221
- logger = get_logger("cli")
222
- logger.debug(f"MCP Gateway verification failed: {e}")
223
-
224
- # Run in background thread to avoid blocking startup
268
+ # Non-blocking - log but don't fail
269
+ logger.debug(f"MCP pre-warming error (non-critical): {e}")
270
+
271
+ # Run pre-warming in background thread
225
272
  import threading
226
- verification_thread = threading.Thread(target=run_verification, daemon=True)
227
- verification_thread.start()
273
+ pre_warm_thread = threading.Thread(target=run_pre_warming, daemon=True)
274
+ pre_warm_thread.start()
275
+
276
+ return
277
+
278
+ # Run detailed verification in background if not configured
279
+ if not gateway_configured:
280
+ # Note: We don't await this to avoid blocking startup
281
+ def run_verification():
282
+ try:
283
+ loop = asyncio.new_event_loop()
284
+ asyncio.set_event_loop(loop)
285
+ results = loop.run_until_complete(verify_mcp_gateway_on_startup())
286
+ loop.close()
287
+
288
+ # Log results but don't block
289
+ from ..core.logger import get_logger
290
+ logger = get_logger("cli")
291
+
292
+ if results.get("gateway_configured"):
293
+ logger.debug("MCP Gateway verification completed successfully")
294
+ else:
295
+ logger.debug("MCP Gateway verification completed with warnings")
296
+
297
+ except Exception as e:
298
+ from ..core.logger import get_logger
299
+ logger = get_logger("cli")
300
+ logger.debug(f"MCP Gateway verification failed: {e}")
301
+
302
+ # Run in background thread to avoid blocking startup
303
+ import threading
304
+ verification_thread = threading.Thread(target=run_verification, daemon=True)
305
+ verification_thread.start()
228
306
 
229
307
  except Exception as e:
230
308
  # Import logger here to avoid circular imports
@@ -181,14 +181,15 @@ class AgentManagerCommand(AgentCommand):
181
181
  """Deploy an agent to specified tier."""
182
182
  try:
183
183
  agent_id = args.agent_id
184
- tier = getattr(args, 'tier', 'user')
184
+ tier = getattr(args, 'tier', 'project') # Default to project (changed from 'user')
185
185
 
186
- # Determine deployment path
187
- if tier == 'project':
188
- deploy_path = Path.cwd() / ".claude" / "agents"
189
- elif tier == 'user':
190
- deploy_path = Path.home() / ".claude" / "agents"
191
- else:
186
+ # Always deploy to project directory
187
+ # Regardless of tier, all agents go to project .claude/agents
188
+ deploy_path = Path.cwd() / ".claude" / "agents"
189
+
190
+ # Note: We're keeping the tier parameter for backward compatibility
191
+ # but it no longer affects the deployment location
192
+ if tier not in ['project', 'user']:
192
193
  return CommandResult.error_result("Invalid tier. Use 'project' or 'user'")
193
194
 
194
195
  # Create directory if needed
@@ -203,7 +204,8 @@ class AgentManagerCommand(AgentCommand):
203
204
  return CommandResult.error_result(f"Agent '{agent_id}' not found")
204
205
 
205
206
  # Deploy using deployment service
206
- self.deployment.deploy_agent(agent_id, str(deploy_path))
207
+ # Pass Path object, not string
208
+ self.deployment.deploy_agent(agent_id, deploy_path)
207
209
 
208
210
  return CommandResult.success_result(
209
211
  f"Agent '{agent_id}' deployed to {tier} level"
@@ -71,6 +71,7 @@ class AgentsCommand(AgentCommand):
71
71
  "deps-install": self._install_agent_dependencies,
72
72
  "deps-list": self._list_agent_dependencies,
73
73
  "deps-fix": self._fix_agent_dependencies,
74
+ "cleanup-orphaned": self._cleanup_orphaned_agents,
74
75
  }
75
76
 
76
77
  if args.agents_command in command_map:
@@ -469,6 +470,87 @@ class AgentsCommand(AgentCommand):
469
470
  except Exception as e:
470
471
  self.logger.error(f"Error fixing dependencies: {e}", exc_info=True)
471
472
  return CommandResult.error_result(f"Error fixing dependencies: {e}")
473
+
474
+ def _cleanup_orphaned_agents(self, args) -> CommandResult:
475
+ """Clean up orphaned agents that don't have templates."""
476
+ try:
477
+ from ...services.agents.deployment.multi_source_deployment_service import (
478
+ MultiSourceAgentDeploymentService
479
+ )
480
+
481
+ # Determine agents directory
482
+ if hasattr(args, 'agents_dir') and args.agents_dir:
483
+ agents_dir = args.agents_dir
484
+ else:
485
+ # Check for project-level .claude/agents first
486
+ project_agents_dir = Path.cwd() / ".claude" / "agents"
487
+ if project_agents_dir.exists():
488
+ agents_dir = project_agents_dir
489
+ else:
490
+ # Fall back to user home directory
491
+ agents_dir = Path.home() / ".claude" / "agents"
492
+
493
+ if not agents_dir.exists():
494
+ return CommandResult.success_result(f"Agents directory not found: {agents_dir}")
495
+
496
+ # Initialize service
497
+ service = MultiSourceAgentDeploymentService()
498
+
499
+ # Determine if we're doing a dry run
500
+ dry_run = getattr(args, 'dry_run', True)
501
+ if hasattr(args, 'force') and args.force:
502
+ dry_run = False
503
+
504
+ # Perform cleanup
505
+ results = service.cleanup_orphaned_agents(agents_dir, dry_run=dry_run)
506
+
507
+ output_format = getattr(args, 'format', 'text')
508
+ quiet = getattr(args, 'quiet', False)
509
+
510
+ if output_format in ['json', 'yaml']:
511
+ return CommandResult.success_result(
512
+ f"Found {len(results.get('orphaned', []))} orphaned agents",
513
+ data=results
514
+ )
515
+ else:
516
+ # Text output
517
+ if not results.get("orphaned"):
518
+ print("✅ No orphaned agents found")
519
+ return CommandResult.success_result("No orphaned agents found")
520
+
521
+ if not quiet:
522
+ print(f"\nFound {len(results['orphaned'])} orphaned agent(s):")
523
+ for orphan in results["orphaned"]:
524
+ print(f" - {orphan['name']} v{orphan['version']}")
525
+
526
+ if dry_run:
527
+ print(
528
+ f"\n📝 This was a dry run. Use --force to actually remove "
529
+ f"{len(results['orphaned'])} orphaned agent(s)"
530
+ )
531
+ else:
532
+ if results.get("removed"):
533
+ print(
534
+ f"\n✅ Successfully removed {len(results['removed'])} orphaned agent(s)"
535
+ )
536
+
537
+ if results.get("errors"):
538
+ print(f"\n❌ Encountered {len(results['errors'])} error(s):")
539
+ for error in results["errors"]:
540
+ print(f" - {error}")
541
+ return CommandResult.error_result(
542
+ f"Cleanup completed with {len(results['errors'])} errors",
543
+ data=results
544
+ )
545
+
546
+ return CommandResult.success_result(
547
+ f"Cleanup {'preview' if dry_run else 'completed'}",
548
+ data=results
549
+ )
550
+
551
+ except Exception as e:
552
+ self.logger.error(f"Error during cleanup: {e}", exc_info=True)
553
+ return CommandResult.error_result(f"Error during cleanup: {e}")
472
554
 
473
555
 
474
556
  def manage_agents(args):