claude-mpm 4.3.20__py3-none-any.whl → 4.3.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/agent_loader.py +2 -2
- claude_mpm/agents/agent_loader_integration.py +2 -2
- claude_mpm/agents/async_agent_loader.py +2 -2
- claude_mpm/agents/base_agent_loader.py +2 -2
- claude_mpm/agents/frontmatter_validator.py +2 -2
- claude_mpm/agents/system_agent_config.py +2 -2
- claude_mpm/agents/templates/data_engineer.json +1 -2
- claude_mpm/cli/commands/doctor.py +2 -2
- claude_mpm/cli/commands/mpm_init.py +560 -47
- claude_mpm/cli/commands/mpm_init_handler.py +6 -0
- claude_mpm/cli/parsers/mpm_init_parser.py +39 -1
- claude_mpm/cli/startup_logging.py +11 -9
- claude_mpm/commands/mpm-init.md +76 -12
- claude_mpm/config/agent_config.py +2 -2
- claude_mpm/config/paths.py +2 -2
- claude_mpm/core/agent_name_normalizer.py +2 -2
- claude_mpm/core/config.py +2 -1
- claude_mpm/core/config_aliases.py +2 -2
- claude_mpm/core/file_utils.py +1 -0
- claude_mpm/core/log_manager.py +2 -2
- claude_mpm/core/tool_access_control.py +2 -2
- claude_mpm/core/unified_agent_registry.py +2 -2
- claude_mpm/core/unified_paths.py +2 -2
- claude_mpm/experimental/cli_enhancements.py +3 -2
- claude_mpm/hooks/base_hook.py +2 -2
- claude_mpm/hooks/instruction_reinforcement.py +2 -2
- claude_mpm/hooks/validation_hooks.py +2 -2
- claude_mpm/scripts/mpm_doctor.py +2 -2
- claude_mpm/services/agents/loading/agent_profile_loader.py +2 -2
- claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
- claude_mpm/services/agents/loading/framework_agent_loader.py +2 -2
- claude_mpm/services/agents/management/agent_capabilities_generator.py +2 -2
- claude_mpm/services/agents/management/agent_management_service.py +2 -2
- claude_mpm/services/agents/memory/memory_categorization_service.py +5 -2
- claude_mpm/services/agents/memory/memory_file_service.py +27 -6
- claude_mpm/services/agents/memory/memory_format_service.py +5 -2
- claude_mpm/services/agents/memory/memory_limits_service.py +3 -2
- claude_mpm/services/agents/registry/deployed_agent_discovery.py +2 -2
- claude_mpm/services/agents/registry/modification_tracker.py +4 -4
- claude_mpm/services/async_session_logger.py +2 -1
- claude_mpm/services/claude_session_logger.py +2 -2
- claude_mpm/services/core/path_resolver.py +3 -2
- claude_mpm/services/diagnostics/diagnostic_runner.py +4 -3
- claude_mpm/services/event_bus/direct_relay.py +2 -1
- claude_mpm/services/event_bus/event_bus.py +2 -1
- claude_mpm/services/event_bus/relay.py +2 -2
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +2 -2
- claude_mpm/services/infrastructure/daemon_manager.py +2 -2
- claude_mpm/services/memory/cache/simple_cache.py +2 -2
- claude_mpm/services/project/archive_manager.py +981 -0
- claude_mpm/services/project/documentation_manager.py +536 -0
- claude_mpm/services/project/enhanced_analyzer.py +491 -0
- claude_mpm/services/project/project_organizer.py +904 -0
- claude_mpm/services/response_tracker.py +2 -2
- claude_mpm/services/socketio/handlers/connection.py +14 -33
- claude_mpm/services/socketio/server/eventbus_integration.py +2 -2
- claude_mpm/services/version_control/version_parser.py +5 -4
- claude_mpm/storage/state_storage.py +2 -2
- claude_mpm/utils/agent_dependency_loader.py +49 -0
- claude_mpm/utils/common.py +542 -0
- claude_mpm/utils/database_connector.py +298 -0
- claude_mpm/utils/error_handler.py +2 -1
- claude_mpm/utils/log_cleanup.py +2 -2
- claude_mpm/utils/path_operations.py +2 -2
- claude_mpm/utils/robust_installer.py +56 -0
- claude_mpm/utils/session_logging.py +2 -2
- claude_mpm/utils/subprocess_utils.py +2 -2
- claude_mpm/validation/agent_validator.py +2 -2
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/METADATA +1 -1
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/RECORD +75 -69
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.3.22.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,536 @@
|
|
1
|
+
"""
|
2
|
+
Documentation Manager Service for Claude MPM Project Initialization
|
3
|
+
===================================================================
|
4
|
+
|
5
|
+
This service manages CLAUDE.md documentation updates, merging, and intelligent
|
6
|
+
content organization for the mpm-init command.
|
7
|
+
|
8
|
+
Key Features:
|
9
|
+
- Smart merging of existing CLAUDE.md content with new sections
|
10
|
+
- Priority-based content organization (🔴🟡🟢⚪)
|
11
|
+
- Content deduplication and conflict resolution
|
12
|
+
- Template-based documentation generation
|
13
|
+
- Version comparison and change tracking
|
14
|
+
|
15
|
+
Author: Claude MPM Development Team
|
16
|
+
Created: 2025-01-26
|
17
|
+
"""
|
18
|
+
|
19
|
+
import difflib
|
20
|
+
import hashlib
|
21
|
+
import json
|
22
|
+
import re
|
23
|
+
from datetime import datetime
|
24
|
+
from pathlib import Path
|
25
|
+
from typing import Dict, List, Optional, Set, Tuple
|
26
|
+
|
27
|
+
from rich.console import Console
|
28
|
+
|
29
|
+
from claude_mpm.core.logging_utils import get_logger
|
30
|
+
logger = get_logger(__name__)
|
31
|
+
console = Console()
|
32
|
+
|
33
|
+
|
34
|
+
class DocumentationManager:
|
35
|
+
"""Manages CLAUDE.md documentation updates and organization."""
|
36
|
+
|
37
|
+
# Priority markers
|
38
|
+
PRIORITY_MARKERS = {
|
39
|
+
"critical": "🔴",
|
40
|
+
"important": "🟡",
|
41
|
+
"standard": "🟢",
|
42
|
+
"optional": "⚪",
|
43
|
+
}
|
44
|
+
|
45
|
+
# Section priority order (higher index = higher priority)
|
46
|
+
SECTION_PRIORITY = {
|
47
|
+
"priority_index": 100,
|
48
|
+
"critical_security": 95,
|
49
|
+
"critical_business": 90,
|
50
|
+
"important_architecture": 80,
|
51
|
+
"important_workflow": 75,
|
52
|
+
"project_overview": 70,
|
53
|
+
"standard_coding": 60,
|
54
|
+
"standard_tasks": 55,
|
55
|
+
"documentation_links": 40,
|
56
|
+
"optional_future": 20,
|
57
|
+
"meta_maintenance": 10,
|
58
|
+
}
|
59
|
+
|
60
|
+
def __init__(self, project_path: Path):
|
61
|
+
"""Initialize the documentation manager."""
|
62
|
+
self.project_path = project_path
|
63
|
+
self.claude_md_path = project_path / "CLAUDE.md"
|
64
|
+
self.existing_content = None
|
65
|
+
self.content_hash = None
|
66
|
+
self._load_existing_content()
|
67
|
+
|
68
|
+
def _load_existing_content(self) -> None:
|
69
|
+
"""Load existing CLAUDE.md content if it exists."""
|
70
|
+
if self.claude_md_path.exists():
|
71
|
+
self.existing_content = self.claude_md_path.read_text(encoding="utf-8")
|
72
|
+
self.content_hash = hashlib.md5(self.existing_content.encode()).hexdigest()
|
73
|
+
logger.info(f"Loaded existing CLAUDE.md ({len(self.existing_content)} chars)")
|
74
|
+
|
75
|
+
def has_existing_documentation(self) -> bool:
|
76
|
+
"""Check if project has existing CLAUDE.md."""
|
77
|
+
return self.claude_md_path.exists()
|
78
|
+
|
79
|
+
def analyze_existing_content(self) -> Dict:
|
80
|
+
"""Analyze existing CLAUDE.md structure and content."""
|
81
|
+
if not self.existing_content:
|
82
|
+
return {"exists": False}
|
83
|
+
|
84
|
+
analysis = {
|
85
|
+
"exists": True,
|
86
|
+
"size": len(self.existing_content),
|
87
|
+
"lines": self.existing_content.count("\n"),
|
88
|
+
"sections": self._extract_sections(self.existing_content),
|
89
|
+
"has_priority_index": "🎯 Priority Index" in self.existing_content,
|
90
|
+
"has_priority_markers": any(
|
91
|
+
marker in self.existing_content
|
92
|
+
for marker in self.PRIORITY_MARKERS.values()
|
93
|
+
),
|
94
|
+
"last_modified": datetime.fromtimestamp(
|
95
|
+
self.claude_md_path.stat().st_mtime
|
96
|
+
).isoformat(),
|
97
|
+
"content_hash": self.content_hash,
|
98
|
+
}
|
99
|
+
|
100
|
+
# Check for outdated patterns
|
101
|
+
analysis["outdated_patterns"] = self._check_outdated_patterns()
|
102
|
+
|
103
|
+
# Extract custom sections
|
104
|
+
analysis["custom_sections"] = self._find_custom_sections(analysis["sections"])
|
105
|
+
|
106
|
+
return analysis
|
107
|
+
|
108
|
+
def _extract_sections(self, content: str) -> List[Dict]:
|
109
|
+
"""Extract section headers and their content from markdown."""
|
110
|
+
sections = []
|
111
|
+
lines = content.split("\n")
|
112
|
+
|
113
|
+
current_section = None
|
114
|
+
current_level = 0
|
115
|
+
section_start = 0
|
116
|
+
|
117
|
+
for i, line in enumerate(lines):
|
118
|
+
if line.startswith("#"):
|
119
|
+
# Save previous section if exists
|
120
|
+
if current_section:
|
121
|
+
sections.append({
|
122
|
+
"title": current_section,
|
123
|
+
"level": current_level,
|
124
|
+
"start_line": section_start,
|
125
|
+
"end_line": i - 1,
|
126
|
+
"content_preview": self._get_content_preview(
|
127
|
+
lines[section_start:i]
|
128
|
+
),
|
129
|
+
})
|
130
|
+
|
131
|
+
# Parse new section
|
132
|
+
level = len(line.split()[0])
|
133
|
+
title = line.lstrip("#").strip()
|
134
|
+
current_section = title
|
135
|
+
current_level = level
|
136
|
+
section_start = i
|
137
|
+
|
138
|
+
# Add last section
|
139
|
+
if current_section:
|
140
|
+
sections.append({
|
141
|
+
"title": current_section,
|
142
|
+
"level": current_level,
|
143
|
+
"start_line": section_start,
|
144
|
+
"end_line": len(lines) - 1,
|
145
|
+
"content_preview": self._get_content_preview(lines[section_start:]),
|
146
|
+
})
|
147
|
+
|
148
|
+
return sections
|
149
|
+
|
150
|
+
def _get_content_preview(self, lines: List[str], max_length: int = 100) -> str:
|
151
|
+
"""Get a preview of section content."""
|
152
|
+
content = " ".join(line.strip() for line in lines[1:6] if line.strip())
|
153
|
+
if len(content) > max_length:
|
154
|
+
content = content[:max_length] + "..."
|
155
|
+
return content
|
156
|
+
|
157
|
+
def _check_outdated_patterns(self) -> List[str]:
|
158
|
+
"""Check for outdated documentation patterns."""
|
159
|
+
patterns = []
|
160
|
+
|
161
|
+
if self.existing_content:
|
162
|
+
# Check for old patterns
|
163
|
+
if "## Installation" in self.existing_content and "pip install" not in self.existing_content:
|
164
|
+
patterns.append("Missing installation instructions")
|
165
|
+
|
166
|
+
if "TODO" in self.existing_content or "FIXME" in self.existing_content:
|
167
|
+
patterns.append("Contains TODO/FIXME items")
|
168
|
+
|
169
|
+
if not re.search(r"Last Updated:|Last Modified:", self.existing_content, re.IGNORECASE):
|
170
|
+
patterns.append("Missing update timestamp")
|
171
|
+
|
172
|
+
if "```" not in self.existing_content:
|
173
|
+
patterns.append("No code examples")
|
174
|
+
|
175
|
+
return patterns
|
176
|
+
|
177
|
+
def _find_custom_sections(self, sections: List[Dict]) -> List[str]:
|
178
|
+
"""Find sections that don't match standard template."""
|
179
|
+
standard_patterns = [
|
180
|
+
r"priority.?index",
|
181
|
+
r"project.?overview",
|
182
|
+
r"critical",
|
183
|
+
r"important",
|
184
|
+
r"standard",
|
185
|
+
r"optional",
|
186
|
+
r"architecture",
|
187
|
+
r"workflow",
|
188
|
+
r"development",
|
189
|
+
r"documentation",
|
190
|
+
r"meta",
|
191
|
+
]
|
192
|
+
|
193
|
+
custom = []
|
194
|
+
for section in sections:
|
195
|
+
title_lower = section["title"].lower()
|
196
|
+
if not any(re.search(pattern, title_lower) for pattern in standard_patterns):
|
197
|
+
custom.append(section["title"])
|
198
|
+
|
199
|
+
return custom
|
200
|
+
|
201
|
+
def merge_with_template(self, new_content: str, preserve_custom: bool = True) -> str:
|
202
|
+
"""Merge existing content with new template content."""
|
203
|
+
if not self.existing_content:
|
204
|
+
return new_content
|
205
|
+
|
206
|
+
logger.info("Merging existing CLAUDE.md with new content...")
|
207
|
+
|
208
|
+
# Parse both contents into sections
|
209
|
+
existing_sections = self._parse_into_sections(self.existing_content)
|
210
|
+
new_sections = self._parse_into_sections(new_content)
|
211
|
+
|
212
|
+
# Merge sections intelligently
|
213
|
+
merged = self._merge_sections(existing_sections, new_sections, preserve_custom)
|
214
|
+
|
215
|
+
# Reorganize by priority
|
216
|
+
merged = self._reorganize_by_priority(merged)
|
217
|
+
|
218
|
+
# Add metadata
|
219
|
+
merged = self._add_metadata(merged)
|
220
|
+
|
221
|
+
return merged
|
222
|
+
|
223
|
+
def _parse_into_sections(self, content: str) -> Dict[str, str]:
|
224
|
+
"""Parse markdown content into a dictionary of sections."""
|
225
|
+
sections = {}
|
226
|
+
current_section = None
|
227
|
+
current_content = []
|
228
|
+
|
229
|
+
for line in content.split("\n"):
|
230
|
+
if line.startswith("#"):
|
231
|
+
# Save previous section
|
232
|
+
if current_section:
|
233
|
+
sections[current_section] = "\n".join(current_content)
|
234
|
+
|
235
|
+
# Start new section
|
236
|
+
current_section = line
|
237
|
+
current_content = []
|
238
|
+
else:
|
239
|
+
current_content.append(line)
|
240
|
+
|
241
|
+
# Save last section
|
242
|
+
if current_section:
|
243
|
+
sections[current_section] = "\n".join(current_content)
|
244
|
+
|
245
|
+
return sections
|
246
|
+
|
247
|
+
def _merge_sections(
|
248
|
+
self, existing: Dict[str, str], new: Dict[str, str], preserve_custom: bool
|
249
|
+
) -> Dict[str, str]:
|
250
|
+
"""Merge existing and new sections intelligently."""
|
251
|
+
merged = {}
|
252
|
+
|
253
|
+
# Start with new sections as base
|
254
|
+
merged.update(new)
|
255
|
+
|
256
|
+
# Preserve custom sections from existing
|
257
|
+
if preserve_custom:
|
258
|
+
for section_header, content in existing.items():
|
259
|
+
section_key = self._get_section_key(section_header)
|
260
|
+
|
261
|
+
# If it's a custom section, preserve it
|
262
|
+
if section_key not in self.SECTION_PRIORITY:
|
263
|
+
merged[section_header] = content
|
264
|
+
logger.info(f"Preserving custom section: {section_header}")
|
265
|
+
|
266
|
+
# If section exists in both, merge content
|
267
|
+
elif section_header in new:
|
268
|
+
merged_content = self._merge_section_content(
|
269
|
+
content, new[section_header], section_header
|
270
|
+
)
|
271
|
+
merged[section_header] = merged_content
|
272
|
+
|
273
|
+
return merged
|
274
|
+
|
275
|
+
def _get_section_key(self, header: str) -> str:
|
276
|
+
"""Extract section key from header for priority mapping."""
|
277
|
+
title = header.lstrip("#").strip().lower()
|
278
|
+
|
279
|
+
# Map to known section types
|
280
|
+
if "priority" in title and "index" in title:
|
281
|
+
return "priority_index"
|
282
|
+
elif "critical" in title and "security" in title:
|
283
|
+
return "critical_security"
|
284
|
+
elif "critical" in title and "business" in title:
|
285
|
+
return "critical_business"
|
286
|
+
elif "important" in title and "architecture" in title:
|
287
|
+
return "important_architecture"
|
288
|
+
elif "important" in title and "workflow" in title:
|
289
|
+
return "important_workflow"
|
290
|
+
elif "project" in title and "overview" in title:
|
291
|
+
return "project_overview"
|
292
|
+
elif "standard" in title and "coding" in title:
|
293
|
+
return "standard_coding"
|
294
|
+
elif "standard" in title and "tasks" in title:
|
295
|
+
return "standard_tasks"
|
296
|
+
elif "documentation" in title:
|
297
|
+
return "documentation_links"
|
298
|
+
elif "optional" in title or "future" in title:
|
299
|
+
return "optional_future"
|
300
|
+
elif "meta" in title or "maintain" in title:
|
301
|
+
return "meta_maintenance"
|
302
|
+
else:
|
303
|
+
return "unknown"
|
304
|
+
|
305
|
+
def _merge_section_content(
|
306
|
+
self, existing: str, new: str, section_header: str
|
307
|
+
) -> str:
|
308
|
+
"""Merge content from existing and new sections."""
|
309
|
+
# For critical sections, prefer new content but append unique existing items
|
310
|
+
if "critical" in section_header.lower() or "important" in section_header.lower():
|
311
|
+
# Extract bullet points from both
|
312
|
+
existing_items = self._extract_bullet_points(existing)
|
313
|
+
new_items = self._extract_bullet_points(new)
|
314
|
+
|
315
|
+
# Combine unique items
|
316
|
+
all_items = new_items.copy()
|
317
|
+
for item in existing_items:
|
318
|
+
if not self._is_duplicate_item(item, new_items):
|
319
|
+
all_items.append(f"{item} [preserved]")
|
320
|
+
|
321
|
+
# Reconstruct section
|
322
|
+
if all_items:
|
323
|
+
return "\n".join([""] + all_items + [""])
|
324
|
+
else:
|
325
|
+
return new
|
326
|
+
else:
|
327
|
+
# For other sections, use new as base and append existing
|
328
|
+
if existing.strip() and existing.strip() != new.strip():
|
329
|
+
return f"{new}\n\n<!-- Preserved from previous version -->\n{existing}"
|
330
|
+
return new
|
331
|
+
|
332
|
+
def _extract_bullet_points(self, content: str) -> List[str]:
|
333
|
+
"""Extract bullet points from content."""
|
334
|
+
points = []
|
335
|
+
for line in content.split("\n"):
|
336
|
+
if line.strip().startswith(("-", "*", "•", "+")):
|
337
|
+
points.append(line.strip())
|
338
|
+
return points
|
339
|
+
|
340
|
+
def _is_duplicate_item(self, item: str, items: List[str]) -> bool:
|
341
|
+
"""Check if item is duplicate of any in list."""
|
342
|
+
item_clean = re.sub(r"[^a-zA-Z0-9\s]", "", item.lower())
|
343
|
+
for existing in items:
|
344
|
+
existing_clean = re.sub(r"[^a-zA-Z0-9\s]", "", existing.lower())
|
345
|
+
# Use fuzzy matching for similarity
|
346
|
+
similarity = difflib.SequenceMatcher(None, item_clean, existing_clean).ratio()
|
347
|
+
if similarity > 0.8: # 80% similarity threshold
|
348
|
+
return True
|
349
|
+
return False
|
350
|
+
|
351
|
+
def _reorganize_by_priority(self, sections: Dict[str, str]) -> str:
|
352
|
+
"""Reorganize sections by priority order."""
|
353
|
+
# Sort sections by priority
|
354
|
+
sorted_sections = sorted(
|
355
|
+
sections.items(),
|
356
|
+
key=lambda x: self.SECTION_PRIORITY.get(
|
357
|
+
self._get_section_key(x[0]), 50 # Default priority
|
358
|
+
),
|
359
|
+
reverse=True, # Higher priority first
|
360
|
+
)
|
361
|
+
|
362
|
+
# Reconstruct document
|
363
|
+
result = []
|
364
|
+
for header, content in sorted_sections:
|
365
|
+
result.append(header)
|
366
|
+
result.append(content)
|
367
|
+
|
368
|
+
return "\n".join(result)
|
369
|
+
|
370
|
+
def _add_metadata(self, content: str) -> str:
|
371
|
+
"""Add metadata to the document."""
|
372
|
+
timestamp = datetime.now().isoformat()
|
373
|
+
|
374
|
+
# Check if meta section exists
|
375
|
+
if "## 📝 Meta:" not in content and "## Meta:" not in content:
|
376
|
+
meta_section = f"""
|
377
|
+
|
378
|
+
## 📝 Meta: Maintaining This Document
|
379
|
+
|
380
|
+
- **Last Updated**: {timestamp}
|
381
|
+
- **Update Method**: Claude MPM /mpm-init (intelligent merge)
|
382
|
+
- **Version Control**: Previous versions archived in `docs/_archive/`
|
383
|
+
- **Update Frequency**: Update when project requirements change significantly
|
384
|
+
- **Priority Guidelines**:
|
385
|
+
- 🔴 CRITICAL: Security, data handling, breaking changes
|
386
|
+
- 🟡 IMPORTANT: Key workflows, architecture decisions
|
387
|
+
- 🟢 STANDARD: Common operations, best practices
|
388
|
+
- ⚪ OPTIONAL: Nice-to-have features, future ideas
|
389
|
+
"""
|
390
|
+
content += meta_section
|
391
|
+
else:
|
392
|
+
# Update timestamp in existing meta section
|
393
|
+
content = re.sub(
|
394
|
+
r"Last Updated[:\s]*[\d\-T:\.]+",
|
395
|
+
f"Last Updated: {timestamp}",
|
396
|
+
content,
|
397
|
+
flags=re.IGNORECASE,
|
398
|
+
)
|
399
|
+
|
400
|
+
return content
|
401
|
+
|
402
|
+
def generate_update_report(self, old_content: str, new_content: str) -> Dict:
|
403
|
+
"""Generate a report of changes between old and new content."""
|
404
|
+
report = {
|
405
|
+
"timestamp": datetime.now().isoformat(),
|
406
|
+
"changes": [],
|
407
|
+
"additions": [],
|
408
|
+
"deletions": [],
|
409
|
+
"statistics": {},
|
410
|
+
}
|
411
|
+
|
412
|
+
# Get diff
|
413
|
+
old_lines = old_content.splitlines()
|
414
|
+
new_lines = new_content.splitlines()
|
415
|
+
diff = difflib.unified_diff(old_lines, new_lines, lineterm="")
|
416
|
+
|
417
|
+
# Analyze changes
|
418
|
+
for line in diff:
|
419
|
+
if line.startswith("+") and not line.startswith("+++"):
|
420
|
+
report["additions"].append(line[1:].strip())
|
421
|
+
elif line.startswith("-") and not line.startswith("---"):
|
422
|
+
report["deletions"].append(line[1:].strip())
|
423
|
+
|
424
|
+
# Statistics
|
425
|
+
report["statistics"] = {
|
426
|
+
"old_lines": len(old_lines),
|
427
|
+
"new_lines": len(new_lines),
|
428
|
+
"lines_added": len(report["additions"]),
|
429
|
+
"lines_removed": len(report["deletions"]),
|
430
|
+
"net_change": len(new_lines) - len(old_lines),
|
431
|
+
}
|
432
|
+
|
433
|
+
# Identify major changes
|
434
|
+
old_sections = set(self._extract_section_titles(old_content))
|
435
|
+
new_sections = set(self._extract_section_titles(new_content))
|
436
|
+
|
437
|
+
report["sections_added"] = list(new_sections - old_sections)
|
438
|
+
report["sections_removed"] = list(old_sections - new_sections)
|
439
|
+
|
440
|
+
return report
|
441
|
+
|
442
|
+
def _extract_section_titles(self, content: str) -> List[str]:
|
443
|
+
"""Extract section titles from content."""
|
444
|
+
titles = []
|
445
|
+
for line in content.splitlines():
|
446
|
+
if line.startswith("#"):
|
447
|
+
titles.append(line.lstrip("#").strip())
|
448
|
+
return titles
|
449
|
+
|
450
|
+
def validate_content(self, content: str) -> Tuple[bool, List[str]]:
|
451
|
+
"""Validate CLAUDE.md content for completeness and correctness."""
|
452
|
+
issues = []
|
453
|
+
|
454
|
+
# Check for required sections
|
455
|
+
required_sections = [
|
456
|
+
"Priority Index",
|
457
|
+
"Project Overview",
|
458
|
+
"CRITICAL",
|
459
|
+
"IMPORTANT",
|
460
|
+
]
|
461
|
+
|
462
|
+
for section in required_sections:
|
463
|
+
if section not in content:
|
464
|
+
issues.append(f"Missing required section: {section}")
|
465
|
+
|
466
|
+
# Check for priority markers
|
467
|
+
has_markers = any(marker in content for marker in self.PRIORITY_MARKERS.values())
|
468
|
+
if not has_markers:
|
469
|
+
issues.append("No priority markers found (🔴🟡🟢⚪)")
|
470
|
+
|
471
|
+
# Check for single-path documentation
|
472
|
+
if "one way" not in content.lower() and "single path" not in content.lower():
|
473
|
+
issues.append("Missing single-path workflow documentation")
|
474
|
+
|
475
|
+
# Check for examples
|
476
|
+
if "```" not in content:
|
477
|
+
issues.append("No code examples found")
|
478
|
+
|
479
|
+
# Check length
|
480
|
+
if len(content) < 1000:
|
481
|
+
issues.append("Documentation seems too brief (< 1000 characters)")
|
482
|
+
|
483
|
+
return len(issues) == 0, issues
|
484
|
+
|
485
|
+
def create_minimal_template(self) -> str:
|
486
|
+
"""Create a minimal CLAUDE.md template."""
|
487
|
+
project_name = self.project_path.name
|
488
|
+
return f"""# {project_name} - CLAUDE.md
|
489
|
+
|
490
|
+
## 🎯 Priority Index
|
491
|
+
|
492
|
+
### 🔴 CRITICAL Instructions
|
493
|
+
- [Add critical security and data handling rules here]
|
494
|
+
|
495
|
+
### 🟡 IMPORTANT Instructions
|
496
|
+
- [Add key architectural decisions and workflows here]
|
497
|
+
|
498
|
+
## 📋 Project Overview
|
499
|
+
|
500
|
+
[Brief description of the project's purpose and goals]
|
501
|
+
|
502
|
+
## 🔴 CRITICAL: Security & Data Handling
|
503
|
+
|
504
|
+
[Critical security requirements and data handling rules]
|
505
|
+
|
506
|
+
## 🟡 IMPORTANT: Development Workflow
|
507
|
+
|
508
|
+
### ONE Way to Build
|
509
|
+
```bash
|
510
|
+
# Add build command
|
511
|
+
```
|
512
|
+
|
513
|
+
### ONE Way to Test
|
514
|
+
```bash
|
515
|
+
# Add test command
|
516
|
+
```
|
517
|
+
|
518
|
+
### ONE Way to Deploy
|
519
|
+
```bash
|
520
|
+
# Add deploy command
|
521
|
+
```
|
522
|
+
|
523
|
+
## 🟢 STANDARD: Coding Guidelines
|
524
|
+
|
525
|
+
[Standard development practices and conventions]
|
526
|
+
|
527
|
+
## 📚 Documentation Links
|
528
|
+
|
529
|
+
- [Link to additional documentation]
|
530
|
+
|
531
|
+
## 📝 Meta: Maintaining This Document
|
532
|
+
|
533
|
+
- **Last Updated**: {datetime.now().isoformat()}
|
534
|
+
- **Created By**: Claude MPM /mpm-init
|
535
|
+
- **Update Frequency**: As needed when requirements change
|
536
|
+
"""
|