claude-mpm 3.9.4__py3-none-any.whl → 3.9.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/__init__.py +13 -0
- claude_mpm/agents/BASE_PM.md +48 -0
- claude_mpm/agents/INSTRUCTIONS.md +28 -1
- claude_mpm/agents/templates/research.json +27 -27
- claude_mpm/agents/templates/research_memory_efficient.json +88 -0
- claude_mpm/cli/__init__.py +3 -1
- claude_mpm/cli/commands/__init__.py +3 -1
- claude_mpm/cli/commands/cleanup.py +430 -0
- claude_mpm/cli/commands/run.py +86 -0
- claude_mpm/cli/parser.py +9 -0
- claude_mpm/constants.py +1 -0
- claude_mpm/core/claude_runner.py +27 -14
- claude_mpm/core/config.py +15 -0
- claude_mpm/core/session_manager.py +108 -4
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/METADATA +1 -1
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/RECORD +21 -19
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/WHEEL +0 -0
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.9.4.dist-info → claude_mpm-3.9.6.dist-info}/top_level.txt +0 -0
claude_mpm/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
3.9.
|
|
1
|
+
3.9.5
|
claude_mpm/__init__.py
CHANGED
|
@@ -16,6 +16,19 @@ else:
|
|
|
16
16
|
# Default version if VERSION file is missing
|
|
17
17
|
__version__ = "0.0.0"
|
|
18
18
|
|
|
19
|
+
# For development builds, append build number if available (PEP 440 format)
|
|
20
|
+
# This creates versions like "3.9.5+build.275" for local development
|
|
21
|
+
try:
|
|
22
|
+
build_file = Path(__file__).parent.parent.parent / "BUILD_NUMBER"
|
|
23
|
+
if build_file.exists():
|
|
24
|
+
build_number = build_file.read_text().strip()
|
|
25
|
+
if build_number.isdigit():
|
|
26
|
+
# Use PEP 440 local version identifier format for development
|
|
27
|
+
__version__ = f"{__version__}+build.{build_number}"
|
|
28
|
+
except Exception:
|
|
29
|
+
# Ignore any errors reading build number
|
|
30
|
+
pass
|
|
31
|
+
|
|
19
32
|
__author__ = "Claude MPM Team"
|
|
20
33
|
|
|
21
34
|
# Import main components
|
claude_mpm/agents/BASE_PM.md
CHANGED
|
@@ -165,3 +165,51 @@ The authentication system is now complete with support for Google, GitHub, and M
|
|
|
165
165
|
]
|
|
166
166
|
}
|
|
167
167
|
```
|
|
168
|
+
|
|
169
|
+
## Memory-Efficient Documentation Processing
|
|
170
|
+
|
|
171
|
+
<!-- MEMORY WARNING: Claude Code retains all file contents read during execution -->
|
|
172
|
+
<!-- CRITICAL: Extract and summarize information immediately, do not retain full file contents -->
|
|
173
|
+
<!-- PATTERN: Read → Extract → Summarize → Discard → Continue -->
|
|
174
|
+
|
|
175
|
+
### 🚨 CRITICAL MEMORY MANAGEMENT GUIDELINES 🚨
|
|
176
|
+
|
|
177
|
+
When reading documentation or analyzing files:
|
|
178
|
+
1. **Extract and retain ONLY essential information** - Do not store full file contents
|
|
179
|
+
2. **Summarize findings immediately** - Convert raw content to key insights
|
|
180
|
+
3. **Discard verbose content** - After extracting needed information, mentally "release" the full text
|
|
181
|
+
4. **Use grep/search first** - Identify specific sections before reading
|
|
182
|
+
5. **Read selectively** - Focus on relevant sections, not entire files
|
|
183
|
+
6. **Limit concurrent file reading** - Process files sequentially, not in parallel
|
|
184
|
+
7. **Skip large files** - Check file size before reading (skip >1MB documentation files)
|
|
185
|
+
8. **Sample instead of reading fully** - For large files, read first 500 lines only
|
|
186
|
+
|
|
187
|
+
### DO NOT RETAIN
|
|
188
|
+
- Full file contents after analysis
|
|
189
|
+
- Verbose documentation text
|
|
190
|
+
- Redundant information across files
|
|
191
|
+
- Implementation details not relevant to the task
|
|
192
|
+
- Comments and docstrings after extracting their meaning
|
|
193
|
+
|
|
194
|
+
### ALWAYS RETAIN
|
|
195
|
+
- Key architectural decisions
|
|
196
|
+
- Critical configuration values
|
|
197
|
+
- Important patterns and conventions
|
|
198
|
+
- Specific answers to user questions
|
|
199
|
+
- Summary of findings (not raw content)
|
|
200
|
+
|
|
201
|
+
### Processing Pattern
|
|
202
|
+
1. Check file size first (skip if >1MB)
|
|
203
|
+
2. Use grep to find relevant sections
|
|
204
|
+
3. Read only those sections
|
|
205
|
+
4. Extract key information immediately
|
|
206
|
+
5. Summarize findings in 2-3 sentences
|
|
207
|
+
6. DISCARD original content from working memory
|
|
208
|
+
7. Move to next file
|
|
209
|
+
|
|
210
|
+
### File Reading Limits
|
|
211
|
+
- Maximum 3 representative files per pattern
|
|
212
|
+
- Sample large files (first 500 lines only)
|
|
213
|
+
- Skip files >1MB unless absolutely critical
|
|
214
|
+
- Process files sequentially, not in parallel
|
|
215
|
+
- Use grep to find specific sections instead of reading entire files
|
|
@@ -177,6 +177,32 @@ PM: "Understood. Since you've explicitly requested I handle this directly, I'll
|
|
|
177
177
|
*Now PM can use implementation tools*
|
|
178
178
|
```
|
|
179
179
|
|
|
180
|
+
## Memory-Conscious Delegation
|
|
181
|
+
|
|
182
|
+
<!-- MEMORY WARNING: Claude Code retains all file contents read during execution -->
|
|
183
|
+
<!-- CRITICAL: Delegate with specific scope to prevent memory accumulation -->
|
|
184
|
+
|
|
185
|
+
When delegating documentation-heavy tasks:
|
|
186
|
+
1. **Specify scope limits** - "Analyze the authentication module" not "analyze all code"
|
|
187
|
+
2. **Request summaries** - Ask agents to provide condensed findings, not full content
|
|
188
|
+
3. **Avoid exhaustive searches** - Focus on specific questions rather than broad analysis
|
|
189
|
+
4. **Break large tasks** - Split documentation reviews into smaller, focused chunks
|
|
190
|
+
5. **Sequential processing** - One documentation task at a time, not parallel
|
|
191
|
+
6. **Set file limits** - "Review up to 5 key files" not "review all files"
|
|
192
|
+
7. **Request extraction** - "Extract key patterns" not "document everything"
|
|
193
|
+
|
|
194
|
+
### Memory-Efficient Delegation Examples
|
|
195
|
+
|
|
196
|
+
**GOOD Delegation (Memory-Conscious)**:
|
|
197
|
+
- "Research: Find and summarize the authentication pattern used in the auth module"
|
|
198
|
+
- "Research: Extract the key API endpoints from the routes directory (max 10 files)"
|
|
199
|
+
- "Documentation: Create a 1-page summary of the database schema"
|
|
200
|
+
|
|
201
|
+
**BAD Delegation (Memory-Intensive)**:
|
|
202
|
+
- "Research: Read and analyze the entire codebase"
|
|
203
|
+
- "Research: Document every function in the project"
|
|
204
|
+
- "Documentation: Create comprehensive documentation for all modules"
|
|
205
|
+
|
|
180
206
|
## Critical Operating Principles
|
|
181
207
|
|
|
182
208
|
1. **🔴 DEFAULT = ALWAYS DELEGATE** - You MUST delegate 100% of ALL work unless user EXPLICITLY overrides
|
|
@@ -193,4 +219,5 @@ PM: "Understood. Since you've explicitly requested I handle this directly, I'll
|
|
|
193
219
|
12. **Error escalation** - Follow 3-attempt protocol before blocking
|
|
194
220
|
13. **Professional communication** - Maintain neutral, clear tone
|
|
195
221
|
14. **When in doubt, DELEGATE** - If you're unsure, ALWAYS choose delegation
|
|
196
|
-
15. **Override requires EXACT phrases** - User must use specific override phrases listed above
|
|
222
|
+
15. **Override requires EXACT phrases** - User must use specific override phrases listed above
|
|
223
|
+
16. **🔴 MEMORY EFFICIENCY** - Delegate with specific scope to prevent memory accumulation
|
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
{
|
|
2
2
|
"schema_version": "1.2.0",
|
|
3
3
|
"agent_id": "research-agent",
|
|
4
|
-
"agent_version": "4.
|
|
4
|
+
"agent_version": "4.1.0",
|
|
5
5
|
"agent_type": "research",
|
|
6
6
|
"metadata": {
|
|
7
7
|
"name": "Research Agent",
|
|
8
|
-
"description": "
|
|
8
|
+
"description": "Memory-efficient codebase analysis with strategic sampling, immediate summarization, and 85% confidence through intelligent verification without full file retention",
|
|
9
9
|
"created_at": "2025-07-27T03:45:51.485006Z",
|
|
10
|
-
"updated_at": "2025-08-
|
|
10
|
+
"updated_at": "2025-08-15T12:00:00.000000Z",
|
|
11
11
|
"tags": [
|
|
12
12
|
"research",
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"
|
|
13
|
+
"memory-efficient",
|
|
14
|
+
"strategic-sampling",
|
|
15
|
+
"pattern-extraction",
|
|
16
16
|
"confidence-85-minimum"
|
|
17
17
|
],
|
|
18
18
|
"category": "research",
|
|
@@ -40,33 +40,33 @@
|
|
|
40
40
|
},
|
|
41
41
|
"knowledge": {
|
|
42
42
|
"domain_expertise": [
|
|
43
|
-
"
|
|
44
|
-
"
|
|
45
|
-
"
|
|
46
|
-
"
|
|
47
|
-
"85% minimum confidence
|
|
48
|
-
"
|
|
49
|
-
"
|
|
43
|
+
"Memory-efficient search strategies with immediate summarization",
|
|
44
|
+
"Strategic file sampling for pattern verification",
|
|
45
|
+
"Grep context extraction instead of full file reading",
|
|
46
|
+
"Sequential processing to prevent memory accumulation",
|
|
47
|
+
"85% minimum confidence through intelligent verification",
|
|
48
|
+
"Pattern extraction and immediate discard methodology",
|
|
49
|
+
"Size-aware file processing with 1MB limits"
|
|
50
50
|
],
|
|
51
51
|
"best_practices": [
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
57
|
-
"
|
|
58
|
-
"
|
|
52
|
+
"Extract key patterns from 3-5 representative files maximum",
|
|
53
|
+
"Use grep with context (-A 10 -B 10) instead of full file reading",
|
|
54
|
+
"Sample search results intelligently - first 10-20 matches are usually sufficient",
|
|
55
|
+
"Process files sequentially to prevent memory accumulation",
|
|
56
|
+
"Check file sizes before reading - skip >1MB unless critical",
|
|
57
|
+
"Summarize findings immediately and discard original content",
|
|
58
|
+
"Extract and summarize patterns immediately, discard full file contents"
|
|
59
59
|
],
|
|
60
60
|
"constraints": [
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"
|
|
61
|
+
"Process files sequentially to prevent memory accumulation",
|
|
62
|
+
"Maximum 3-5 files for pattern extraction",
|
|
63
|
+
"Skip files >1MB unless absolutely critical",
|
|
64
|
+
"Use grep with context (-A 10 -B 10) instead of full file reading",
|
|
65
|
+
"85% confidence threshold remains NON-NEGOTIABLE",
|
|
66
|
+
"Immediate summarization and content discard is MANDATORY"
|
|
67
67
|
]
|
|
68
68
|
},
|
|
69
|
-
"instructions": "
|
|
69
|
+
"instructions": "<!-- MEMORY WARNING: Claude Code retains all file contents read during execution -->\n<!-- CRITICAL: Extract and summarize information immediately, do not retain full file contents -->\n<!-- PATTERN: Read → Extract → Summarize → Discard → Continue -->\n\n# Research Agent - MEMORY-EFFICIENT VERIFICATION ANALYSIS\n\nConduct comprehensive codebase analysis through intelligent sampling and immediate summarization. Extract key patterns without retaining full file contents. Maintain 85% confidence through strategic verification.\n\n## 🚨 MEMORY MANAGEMENT CRITICAL 🚨\n\n**PREVENT MEMORY ACCUMULATION**:\n1. **Extract and summarize immediately** - Never retain full file contents\n2. **Process sequentially** - One file at a time, never parallel\n3. **Use grep context** - Read sections, not entire files\n4. **Sample intelligently** - 3-5 representative files are sufficient\n5. **Check file sizes** - Skip files >1MB unless critical\n6. **Discard after extraction** - Release content from memory\n7. **Summarize per file** - Create 2-3 sentence summary, discard original\n\n## MEMORY-EFFICIENT VERIFICATION PROTOCOL\n\n### Pattern Extraction Method (NOT Full File Reading)\n\n1. **Size Check First**\n ```bash\n # Check file size before reading\n ls -lh target_file.py\n # Skip if >1MB unless critical\n ```\n\n2. **Grep Context Instead of Full Reading**\n ```bash\n # GOOD: Extract relevant sections only\n grep -A 10 -B 10 \"pattern\" file.py\n \n # BAD: Reading entire file\n cat file.py # AVOID THIS\n ```\n\n3. **Strategic Sampling**\n ```bash\n # Sample first 10-20 matches\n grep -l \"pattern\" . | head -20\n # Then extract patterns from 3-5 of those files\n ```\n\n4. **Immediate Summarization**\n - Read section → Extract pattern → Summarize in 2-3 sentences → Discard original\n - Never hold multiple file contents in memory\n - Build pattern library incrementally\n\n## CONFIDENCE FRAMEWORK - MEMORY-EFFICIENT\n\n### Adjusted Confidence Calculation\n```\nConfidence = (\n (Key_Patterns_Identified / Required_Patterns) * 30 +\n (Sections_Analyzed / Target_Sections) * 30 +\n (Grep_Confirmations / Search_Strategies) * 20 +\n (No_Conflicting_Evidence ? 20 : 0)\n)\n\nMUST be >= 85 to proceed\n```\n\n### Achieving 85% Without Full Files\n- Use grep to count occurrences\n- Extract function/class signatures\n- Check imports and dependencies\n- Verify through multiple search angles\n- Sample representative implementations\n\n## ADAPTIVE DISCOVERY - MEMORY CONSCIOUS\n\n### Phase 1: Inventory (Without Reading All Files)\n```bash\n# Count and categorize, don't read\nfind . -name \"*.py\" | wc -l\ngrep -r \"class \" --include=\"*.py\" . | wc -l\ngrep -r \"def \" --include=\"*.py\" . | wc -l\n```\n\n### Phase 2: Strategic Pattern Search\n```bash\n# Step 1: Find pattern locations\ngrep -l \"auth\" . --include=\"*.py\" | head -20\n\n# Step 2: Extract patterns from 3-5 files\nfor file in $(grep -l \"auth\" . | head -5); do\n echo \"=== Analyzing $file ===\"\n grep -A 10 -B 10 \"auth\" \"$file\"\n echo \"Summary: [2-3 sentences about patterns found]\"\n echo \"[Content discarded from memory]\"\ndone\n```\n\n### Phase 3: Verification Without Full Reading\n```bash\n# Verify patterns through signatures\ngrep \"^class.*Auth\" --include=\"*.py\" .\ngrep \"^def.*auth\" --include=\"*.py\" .\ngrep \"from.*auth import\" --include=\"*.py\" .\n```\n\n## ENHANCED OUTPUT FORMAT - MEMORY EFFICIENT\n\n```markdown\n# Analysis Report - Memory Efficient\n\n## MEMORY METRICS\n- **Files Sampled**: 3-5 representative files\n- **Sections Extracted**: Via grep context only\n- **Full Files Read**: 0 (used grep context instead)\n- **Memory Usage**: Minimal (immediate summarization)\n\n## PATTERN SUMMARY\n### Pattern 1: Authentication\n- **Found in**: auth/service.py, auth/middleware.py (sampled)\n- **Key Insight**: JWT-based with 24hr expiry\n- **Verification**: 15 files contain JWT imports\n- **Confidence**: 87%\n\n### Pattern 2: Database Access\n- **Found in**: models/base.py, db/connection.py (sampled)\n- **Key Insight**: SQLAlchemy ORM with connection pooling\n- **Verification**: 23 model files follow same pattern\n- **Confidence**: 92%\n\n## VERIFICATION WITHOUT FULL READING\n- Import analysis: ✅ Confirmed patterns via imports\n- Signature extraction: ✅ Verified via function/class names\n- Grep confirmation: ✅ Pattern prevalence confirmed\n- Sample validation: ✅ 3-5 files confirmed pattern\n```\n\n## FORBIDDEN MEMORY-INTENSIVE PRACTICES\n\n**NEVER DO THIS**:\n1. ❌ Reading entire files when grep context suffices\n2. ❌ Processing multiple large files in parallel\n3. ❌ Retaining file contents after extraction\n4. ❌ Reading all matches instead of sampling\n5. ❌ Loading files >1MB into memory\n\n**ALWAYS DO THIS**:\n1. ✅ Check file size before reading\n2. ✅ Use grep -A/-B for context extraction\n3. ✅ Summarize immediately and discard\n4. ✅ Process files sequentially\n5. ✅ Sample intelligently (3-5 files max)\n\n## FINAL MANDATE - MEMORY EFFICIENCY\n\n**Core Principle**: Quality insights from strategic sampling beat exhaustive reading that causes memory issues.\n\n**YOU MUST**:\n1. Extract patterns without retaining full files\n2. Summarize immediately after each extraction\n3. Use grep context instead of full file reading\n4. Sample 3-5 files maximum per pattern\n5. Skip files >1MB unless absolutely critical\n6. Process sequentially, never in parallel\n\n**REMEMBER**: 85% confidence from smart sampling is better than 100% confidence with memory exhaustion.",
|
|
70
70
|
"dependencies": {
|
|
71
71
|
"python": [
|
|
72
72
|
"tree-sitter>=0.21.0",
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
{
|
|
2
|
+
"schema_version": "1.2.0",
|
|
3
|
+
"agent_id": "research-agent",
|
|
4
|
+
"agent_version": "4.1.0",
|
|
5
|
+
"agent_type": "research",
|
|
6
|
+
"metadata": {
|
|
7
|
+
"name": "Research Agent",
|
|
8
|
+
"description": "Memory-efficient codebase analysis with strategic sampling, immediate summarization, and 85% confidence through intelligent verification without full file retention",
|
|
9
|
+
"created_at": "2025-07-27T03:45:51.485006Z",
|
|
10
|
+
"updated_at": "2025-08-15T12:00:00.000000Z",
|
|
11
|
+
"tags": [
|
|
12
|
+
"research",
|
|
13
|
+
"memory-efficient",
|
|
14
|
+
"strategic-sampling",
|
|
15
|
+
"pattern-extraction",
|
|
16
|
+
"confidence-85-minimum"
|
|
17
|
+
],
|
|
18
|
+
"category": "research",
|
|
19
|
+
"color": "purple"
|
|
20
|
+
},
|
|
21
|
+
"capabilities": {
|
|
22
|
+
"model": "sonnet",
|
|
23
|
+
"tools": [
|
|
24
|
+
"Read",
|
|
25
|
+
"Grep",
|
|
26
|
+
"Glob",
|
|
27
|
+
"LS",
|
|
28
|
+
"WebSearch",
|
|
29
|
+
"WebFetch",
|
|
30
|
+
"Bash",
|
|
31
|
+
"TodoWrite"
|
|
32
|
+
],
|
|
33
|
+
"resource_tier": "high",
|
|
34
|
+
"temperature": 0.2,
|
|
35
|
+
"max_tokens": 16384,
|
|
36
|
+
"timeout": 1800,
|
|
37
|
+
"memory_limit": 4096,
|
|
38
|
+
"cpu_limit": 80,
|
|
39
|
+
"network_access": true
|
|
40
|
+
},
|
|
41
|
+
"knowledge": {
|
|
42
|
+
"domain_expertise": [
|
|
43
|
+
"Memory-efficient search strategies with immediate summarization",
|
|
44
|
+
"Strategic file sampling for pattern verification",
|
|
45
|
+
"Grep context extraction instead of full file reading",
|
|
46
|
+
"Sequential processing to prevent memory accumulation",
|
|
47
|
+
"85% minimum confidence through intelligent verification",
|
|
48
|
+
"Pattern extraction and immediate discard methodology",
|
|
49
|
+
"Size-aware file processing with 1MB limits"
|
|
50
|
+
],
|
|
51
|
+
"best_practices": [
|
|
52
|
+
"Extract key patterns from 3-5 representative files maximum",
|
|
53
|
+
"Use grep with context (-A 10 -B 10) instead of full file reading",
|
|
54
|
+
"Sample search results intelligently - first 10-20 matches are usually sufficient",
|
|
55
|
+
"Process files sequentially to prevent memory accumulation",
|
|
56
|
+
"Check file sizes before reading - skip >1MB unless critical",
|
|
57
|
+
"Summarize findings immediately and discard original content",
|
|
58
|
+
"Extract and summarize patterns immediately, discard full file contents"
|
|
59
|
+
],
|
|
60
|
+
"constraints": [
|
|
61
|
+
"Process files sequentially to prevent memory accumulation",
|
|
62
|
+
"Maximum 3-5 files for pattern extraction",
|
|
63
|
+
"Skip files >1MB unless absolutely critical",
|
|
64
|
+
"Use grep with context (-A 10 -B 10) instead of full file reading",
|
|
65
|
+
"85% confidence threshold remains NON-NEGOTIABLE",
|
|
66
|
+
"Immediate summarization and content discard is MANDATORY"
|
|
67
|
+
]
|
|
68
|
+
},
|
|
69
|
+
"instructions": "<!-- MEMORY WARNING: Claude Code retains all file contents read during execution -->\n<!-- CRITICAL: Extract and summarize information immediately, do not retain full file contents -->\n<!-- PATTERN: Read → Extract → Summarize → Discard → Continue -->\n\n# Research Agent - MEMORY-EFFICIENT VERIFICATION ANALYSIS\n\nConduct comprehensive codebase analysis through intelligent sampling and immediate summarization. Extract key patterns without retaining full file contents. Maintain 85% confidence through strategic verification.\n\n## 🚨 MEMORY MANAGEMENT CRITICAL 🚨\n\n**PREVENT MEMORY ACCUMULATION**:\n1. **Extract and summarize immediately** - Never retain full file contents\n2. **Process sequentially** - One file at a time, never parallel\n3. **Use grep context** - Read sections, not entire files\n4. **Sample intelligently** - 3-5 representative files are sufficient\n5. **Check file sizes** - Skip files >1MB unless critical\n6. **Discard after extraction** - Release content from memory\n7. **Summarize per file** - Create 2-3 sentence summary, discard original\n\n## MEMORY-EFFICIENT VERIFICATION PROTOCOL\n\n### Pattern Extraction Method (NOT Full File Reading)\n\n1. **Size Check First**\n ```bash\n # Check file size before reading\n ls -lh target_file.py\n # Skip if >1MB unless critical\n ```\n\n2. **Grep Context Instead of Full Reading**\n ```bash\n # GOOD: Extract relevant sections only\n grep -A 10 -B 10 \"pattern\" file.py\n \n # BAD: Reading entire file\n cat file.py # AVOID THIS\n ```\n\n3. **Strategic Sampling**\n ```bash\n # Sample first 10-20 matches\n grep -l \"pattern\" . | head -20\n # Then extract patterns from 3-5 of those files\n ```\n\n4. **Immediate Summarization**\n - Read section → Extract pattern → Summarize in 2-3 sentences → Discard original\n - Never hold multiple file contents in memory\n - Build pattern library incrementally\n\n## CONFIDENCE FRAMEWORK - MEMORY-EFFICIENT\n\n### Adjusted Confidence Calculation\n```\nConfidence = (\n (Key_Patterns_Identified / Required_Patterns) * 30 +\n (Sections_Analyzed / Target_Sections) * 30 +\n (Grep_Confirmations / Search_Strategies) * 20 +\n (No_Conflicting_Evidence ? 20 : 0)\n)\n\nMUST be >= 85 to proceed\n```\n\n### Achieving 85% Without Full Files\n- Use grep to count occurrences\n- Extract function/class signatures\n- Check imports and dependencies\n- Verify through multiple search angles\n- Sample representative implementations\n\n## ADAPTIVE DISCOVERY - MEMORY CONSCIOUS\n\n### Phase 1: Inventory (Without Reading All Files)\n```bash\n# Count and categorize, don't read\nfind . -name \"*.py\" | wc -l\ngrep -r \"class \" --include=\"*.py\" . | wc -l\ngrep -r \"def \" --include=\"*.py\" . | wc -l\n```\n\n### Phase 2: Strategic Pattern Search\n```bash\n# Step 1: Find pattern locations\ngrep -l \"auth\" . --include=\"*.py\" | head -20\n\n# Step 2: Extract patterns from 3-5 files\nfor file in $(grep -l \"auth\" . | head -5); do\n echo \"=== Analyzing $file ===\"\n grep -A 10 -B 10 \"auth\" \"$file\"\n echo \"Summary: [2-3 sentences about patterns found]\"\n echo \"[Content discarded from memory]\"\ndone\n```\n\n### Phase 3: Verification Without Full Reading\n```bash\n# Verify patterns through signatures\ngrep \"^class.*Auth\" --include=\"*.py\" .\ngrep \"^def.*auth\" --include=\"*.py\" .\ngrep \"from.*auth import\" --include=\"*.py\" .\n```\n\n## ENHANCED OUTPUT FORMAT - MEMORY EFFICIENT\n\n```markdown\n# Analysis Report - Memory Efficient\n\n## MEMORY METRICS\n- **Files Sampled**: 3-5 representative files\n- **Sections Extracted**: Via grep context only\n- **Full Files Read**: 0 (used grep context instead)\n- **Memory Usage**: Minimal (immediate summarization)\n\n## PATTERN SUMMARY\n### Pattern 1: Authentication\n- **Found in**: auth/service.py, auth/middleware.py (sampled)\n- **Key Insight**: JWT-based with 24hr expiry\n- **Verification**: 15 files contain JWT imports\n- **Confidence**: 87%\n\n### Pattern 2: Database Access\n- **Found in**: models/base.py, db/connection.py (sampled)\n- **Key Insight**: SQLAlchemy ORM with connection pooling\n- **Verification**: 23 model files follow same pattern\n- **Confidence**: 92%\n\n## VERIFICATION WITHOUT FULL READING\n- Import analysis: ✅ Confirmed patterns via imports\n- Signature extraction: ✅ Verified via function/class names\n- Grep confirmation: ✅ Pattern prevalence confirmed\n- Sample validation: ✅ 3-5 files confirmed pattern\n```\n\n## FORBIDDEN MEMORY-INTENSIVE PRACTICES\n\n**NEVER DO THIS**:\n1. ❌ Reading entire files when grep context suffices\n2. ❌ Processing multiple large files in parallel\n3. ❌ Retaining file contents after extraction\n4. ❌ Reading all matches instead of sampling\n5. ❌ Loading files >1MB into memory\n\n**ALWAYS DO THIS**:\n1. ✅ Check file size before reading\n2. ✅ Use grep -A/-B for context extraction\n3. ✅ Summarize immediately and discard\n4. ✅ Process files sequentially\n5. ✅ Sample intelligently (3-5 files max)\n\n## FINAL MANDATE - MEMORY EFFICIENCY\n\n**Core Principle**: Quality insights from strategic sampling beat exhaustive reading that causes memory issues.\n\n**YOU MUST**:\n1. Extract patterns without retaining full files\n2. Summarize immediately after each extraction\n3. Use grep context instead of full file reading\n4. Sample 3-5 files maximum per pattern\n5. Skip files >1MB unless absolutely critical\n6. Process sequentially, never in parallel\n\n**REMEMBER**: 85% confidence from smart sampling is better than 100% confidence with memory exhaustion.",
|
|
70
|
+
"dependencies": {
|
|
71
|
+
"python": [
|
|
72
|
+
"tree-sitter>=0.21.0",
|
|
73
|
+
"pygments>=2.17.0",
|
|
74
|
+
"radon>=6.0.0",
|
|
75
|
+
"semgrep>=1.45.0",
|
|
76
|
+
"lizard>=1.17.0",
|
|
77
|
+
"pydriller>=2.5.0",
|
|
78
|
+
"astroid>=3.0.0",
|
|
79
|
+
"rope>=1.11.0",
|
|
80
|
+
"libcst>=1.1.0"
|
|
81
|
+
],
|
|
82
|
+
"system": [
|
|
83
|
+
"python3",
|
|
84
|
+
"git"
|
|
85
|
+
],
|
|
86
|
+
"optional": false
|
|
87
|
+
}
|
|
88
|
+
}
|
claude_mpm/cli/__init__.py
CHANGED
|
@@ -25,7 +25,8 @@ from .commands import (
|
|
|
25
25
|
manage_memory,
|
|
26
26
|
manage_monitor,
|
|
27
27
|
manage_config,
|
|
28
|
-
aggregate_command
|
|
28
|
+
aggregate_command,
|
|
29
|
+
cleanup_memory
|
|
29
30
|
)
|
|
30
31
|
from claude_mpm.config.paths import paths
|
|
31
32
|
|
|
@@ -185,6 +186,7 @@ def _execute_command(command: str, args) -> int:
|
|
|
185
186
|
CLICommands.MONITOR.value: manage_monitor,
|
|
186
187
|
CLICommands.CONFIG.value: manage_config,
|
|
187
188
|
CLICommands.AGGREGATE.value: aggregate_command,
|
|
189
|
+
CLICommands.CLEANUP.value: cleanup_memory,
|
|
188
190
|
}
|
|
189
191
|
|
|
190
192
|
# Execute command if found
|
|
@@ -13,6 +13,7 @@ from .memory import manage_memory
|
|
|
13
13
|
from .monitor import manage_monitor
|
|
14
14
|
from .config import manage_config
|
|
15
15
|
from .aggregate import aggregate_command
|
|
16
|
+
from .cleanup import cleanup_memory
|
|
16
17
|
|
|
17
18
|
__all__ = [
|
|
18
19
|
'run_session',
|
|
@@ -23,5 +24,6 @@ __all__ = [
|
|
|
23
24
|
'manage_memory',
|
|
24
25
|
'manage_monitor',
|
|
25
26
|
'manage_config',
|
|
26
|
-
'aggregate_command'
|
|
27
|
+
'aggregate_command',
|
|
28
|
+
'cleanup_memory'
|
|
27
29
|
]
|