claude-mpm 4.1.5__py3-none-any.whl → 4.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/templates/agent-manager.json +1 -1
- claude_mpm/agents/templates/agent-manager.md +111 -34
- claude_mpm/agents/templates/research.json +39 -13
- claude_mpm/cli/__init__.py +2 -0
- claude_mpm/cli/commands/__init__.py +2 -0
- claude_mpm/cli/commands/configure.py +1221 -0
- claude_mpm/cli/commands/configure_tui.py +1921 -0
- claude_mpm/cli/parsers/base_parser.py +7 -0
- claude_mpm/cli/parsers/configure_parser.py +119 -0
- claude_mpm/cli/startup_logging.py +39 -12
- claude_mpm/config/socketio_config.py +33 -4
- claude_mpm/constants.py +1 -0
- claude_mpm/core/socketio_pool.py +35 -3
- claude_mpm/dashboard/static/css/connection-status.css +370 -0
- claude_mpm/dashboard/static/js/components/connection-debug.js +654 -0
- claude_mpm/dashboard/static/js/connection-manager.js +536 -0
- claude_mpm/dashboard/static/js/socket-client.js +40 -16
- claude_mpm/dashboard/templates/index.html +11 -0
- claude_mpm/hooks/claude_hooks/services/__init__.py +3 -1
- claude_mpm/hooks/claude_hooks/services/connection_manager.py +17 -0
- claude_mpm/hooks/claude_hooks/services/connection_manager_http.py +190 -0
- claude_mpm/services/diagnostics/checks/__init__.py +2 -0
- claude_mpm/services/diagnostics/checks/instructions_check.py +418 -0
- claude_mpm/services/diagnostics/diagnostic_runner.py +15 -2
- claude_mpm/services/event_bus/direct_relay.py +230 -0
- claude_mpm/services/socketio/handlers/connection_handler.py +330 -0
- claude_mpm/services/socketio/server/broadcaster.py +32 -1
- claude_mpm/services/socketio/server/connection_manager.py +547 -0
- claude_mpm/services/socketio/server/core.py +78 -7
- claude_mpm/services/socketio/server/eventbus_integration.py +20 -9
- claude_mpm/services/socketio/server/main.py +74 -19
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/METADATA +3 -1
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/RECORD +38 -41
- claude_mpm/agents/OUTPUT_STYLE.md +0 -73
- claude_mpm/agents/backups/INSTRUCTIONS.md +0 -352
- claude_mpm/agents/templates/OPTIMIZATION_REPORT.md +0 -156
- claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json +0 -79
- claude_mpm/agents/templates/backup/documentation_agent_20250726_234551.json +0 -68
- claude_mpm/agents/templates/backup/engineer_agent_20250726_234551.json +0 -77
- claude_mpm/agents/templates/backup/ops_agent_20250726_234551.json +0 -78
- claude_mpm/agents/templates/backup/qa_agent_20250726_234551.json +0 -67
- claude_mpm/agents/templates/backup/research_agent_2025011_234551.json +0 -88
- claude_mpm/agents/templates/backup/research_agent_20250726_234551.json +0 -72
- claude_mpm/agents/templates/backup/research_memory_efficient.json +0 -88
- claude_mpm/agents/templates/backup/security_agent_20250726_234551.json +0 -78
- claude_mpm/agents/templates/backup/version_control_agent_20250726_234551.json +0 -62
- claude_mpm/agents/templates/vercel_ops_instructions.md +0 -582
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/WHEEL +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.7.dist-info}/top_level.txt +0 -0
|
@@ -170,11 +170,28 @@ class ConnectionManagerService:
|
|
|
170
170
|
# Publish to EventBus with topic format: hook.{event}
|
|
171
171
|
topic = f"hook.{event}"
|
|
172
172
|
self.event_bus.publish(topic, claude_event_data)
|
|
173
|
+
|
|
174
|
+
# Enhanced verification logging
|
|
173
175
|
if DEBUG:
|
|
174
176
|
print(f"✅ Published to EventBus: {topic}", file=sys.stderr)
|
|
177
|
+
# Get EventBus stats to verify publication
|
|
178
|
+
if hasattr(self.event_bus, "get_stats"):
|
|
179
|
+
stats = self.event_bus.get_stats()
|
|
180
|
+
print(
|
|
181
|
+
f"📊 EventBus stats after publish: {stats}", file=sys.stderr
|
|
182
|
+
)
|
|
183
|
+
# Log the number of data keys being published
|
|
184
|
+
if isinstance(claude_event_data, dict):
|
|
185
|
+
print(
|
|
186
|
+
f"📦 Published data keys: {list(claude_event_data.keys())}",
|
|
187
|
+
file=sys.stderr,
|
|
188
|
+
)
|
|
175
189
|
except Exception as e:
|
|
176
190
|
if DEBUG:
|
|
177
191
|
print(f"⚠️ Failed to publish to EventBus: {e}", file=sys.stderr)
|
|
192
|
+
import traceback
|
|
193
|
+
|
|
194
|
+
traceback.print_exc(file=sys.stderr)
|
|
178
195
|
|
|
179
196
|
# Warn if neither method is available
|
|
180
197
|
if not self.connection_pool and not self.event_bus and DEBUG:
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""HTTP-based connection management service for Claude hook handler.
|
|
2
|
+
|
|
3
|
+
This service manages:
|
|
4
|
+
- HTTP POST event emission for ephemeral hook processes
|
|
5
|
+
- EventBus initialization (optional)
|
|
6
|
+
- Event emission through both channels
|
|
7
|
+
|
|
8
|
+
DESIGN DECISION: Use stateless HTTP POST instead of persistent SocketIO
|
|
9
|
+
connections because hook handlers are ephemeral processes (< 1 second lifetime).
|
|
10
|
+
This eliminates disconnection issues and matches the process lifecycle.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import os
|
|
14
|
+
import sys
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
|
|
17
|
+
# Debug mode is enabled by default for better visibility into hook processing
|
|
18
|
+
DEBUG = os.environ.get("CLAUDE_MPM_HOOK_DEBUG", "true").lower() != "false"
|
|
19
|
+
|
|
20
|
+
# Import requests for HTTP POST communication
|
|
21
|
+
try:
|
|
22
|
+
import requests
|
|
23
|
+
|
|
24
|
+
REQUESTS_AVAILABLE = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
REQUESTS_AVAILABLE = False
|
|
27
|
+
requests = None
|
|
28
|
+
|
|
29
|
+
# Import EventNormalizer for consistent event formatting
|
|
30
|
+
try:
|
|
31
|
+
from claude_mpm.services.socketio.event_normalizer import EventNormalizer
|
|
32
|
+
except ImportError:
|
|
33
|
+
# Create a simple fallback EventNormalizer if import fails
|
|
34
|
+
class EventNormalizer:
|
|
35
|
+
def normalize(self, event_data, source="hook"):
|
|
36
|
+
"""Simple fallback normalizer that returns event as-is."""
|
|
37
|
+
return type(
|
|
38
|
+
"NormalizedEvent",
|
|
39
|
+
(),
|
|
40
|
+
{
|
|
41
|
+
"to_dict": lambda: {
|
|
42
|
+
"event": "claude_event",
|
|
43
|
+
"type": event_data.get("type", "unknown"),
|
|
44
|
+
"subtype": event_data.get("subtype", "generic"),
|
|
45
|
+
"timestamp": event_data.get(
|
|
46
|
+
"timestamp", datetime.now().isoformat()
|
|
47
|
+
),
|
|
48
|
+
"data": event_data.get("data", event_data),
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# Import EventBus for decoupled event distribution
|
|
55
|
+
try:
|
|
56
|
+
from claude_mpm.services.event_bus import EventBus
|
|
57
|
+
|
|
58
|
+
EVENTBUS_AVAILABLE = True
|
|
59
|
+
except ImportError:
|
|
60
|
+
EVENTBUS_AVAILABLE = False
|
|
61
|
+
EventBus = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class ConnectionManagerService:
|
|
65
|
+
"""Manages connections for the Claude hook handler using HTTP POST."""
|
|
66
|
+
|
|
67
|
+
def __init__(self):
|
|
68
|
+
"""Initialize connection management service."""
|
|
69
|
+
# Event normalizer for consistent event schema
|
|
70
|
+
self.event_normalizer = EventNormalizer()
|
|
71
|
+
|
|
72
|
+
# Server configuration for HTTP POST
|
|
73
|
+
self.server_host = os.environ.get("CLAUDE_MPM_SERVER_HOST", "localhost")
|
|
74
|
+
self.server_port = int(os.environ.get("CLAUDE_MPM_SERVER_PORT", "8765"))
|
|
75
|
+
self.http_endpoint = f"http://{self.server_host}:{self.server_port}/api/events"
|
|
76
|
+
|
|
77
|
+
# Initialize EventBus for in-process event distribution (optional)
|
|
78
|
+
self.event_bus = None
|
|
79
|
+
self._initialize_eventbus()
|
|
80
|
+
|
|
81
|
+
# For backward compatibility with tests
|
|
82
|
+
self.connection_pool = None # No longer used
|
|
83
|
+
|
|
84
|
+
if DEBUG:
|
|
85
|
+
print(
|
|
86
|
+
f"✅ HTTP connection manager initialized - endpoint: {self.http_endpoint}",
|
|
87
|
+
file=sys.stderr,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def _initialize_eventbus(self):
|
|
91
|
+
"""Initialize the EventBus for in-process distribution."""
|
|
92
|
+
if EVENTBUS_AVAILABLE:
|
|
93
|
+
try:
|
|
94
|
+
self.event_bus = EventBus.get_instance()
|
|
95
|
+
if DEBUG:
|
|
96
|
+
print("✅ EventBus initialized for hook handler", file=sys.stderr)
|
|
97
|
+
except Exception as e:
|
|
98
|
+
if DEBUG:
|
|
99
|
+
print(f"⚠️ Failed to initialize EventBus: {e}", file=sys.stderr)
|
|
100
|
+
self.event_bus = None
|
|
101
|
+
|
|
102
|
+
def emit_event(self, namespace: str, event: str, data: dict):
|
|
103
|
+
"""Emit event using HTTP POST and optionally EventBus.
|
|
104
|
+
|
|
105
|
+
WHY HTTP POST approach:
|
|
106
|
+
- Stateless: Perfect for ephemeral hook processes
|
|
107
|
+
- Fire-and-forget: No connection management needed
|
|
108
|
+
- Fast: Minimal overhead, no handshake
|
|
109
|
+
- Reliable: Server handles buffering and retries
|
|
110
|
+
"""
|
|
111
|
+
# Create event data for normalization
|
|
112
|
+
raw_event = {
|
|
113
|
+
"type": "hook",
|
|
114
|
+
"subtype": event, # e.g., "user_prompt", "pre_tool", "subagent_stop"
|
|
115
|
+
"timestamp": datetime.now().isoformat(),
|
|
116
|
+
"data": data,
|
|
117
|
+
"source": "claude_hooks", # Identify the source
|
|
118
|
+
"session_id": data.get("sessionId"), # Include session if available
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Normalize the event using EventNormalizer for consistent schema
|
|
122
|
+
normalized_event = self.event_normalizer.normalize(raw_event, source="hook")
|
|
123
|
+
claude_event_data = normalized_event.to_dict()
|
|
124
|
+
|
|
125
|
+
# Log important events for debugging
|
|
126
|
+
if DEBUG and event in ["subagent_stop", "pre_tool"]:
|
|
127
|
+
if event == "subagent_stop":
|
|
128
|
+
agent_type = data.get("agent_type", "unknown")
|
|
129
|
+
print(
|
|
130
|
+
f"Hook handler: Publishing SubagentStop for agent '{agent_type}'",
|
|
131
|
+
file=sys.stderr,
|
|
132
|
+
)
|
|
133
|
+
elif event == "pre_tool" and data.get("tool_name") == "Task":
|
|
134
|
+
delegation = data.get("delegation_details", {})
|
|
135
|
+
agent_type = delegation.get("agent_type", "unknown")
|
|
136
|
+
print(
|
|
137
|
+
f"Hook handler: Publishing Task delegation to agent '{agent_type}'",
|
|
138
|
+
file=sys.stderr,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Primary method: HTTP POST to server
|
|
142
|
+
# This is fire-and-forget with a short timeout
|
|
143
|
+
if REQUESTS_AVAILABLE:
|
|
144
|
+
try:
|
|
145
|
+
# Send HTTP POST with short timeout (fire-and-forget pattern)
|
|
146
|
+
response = requests.post(
|
|
147
|
+
self.http_endpoint,
|
|
148
|
+
json=claude_event_data,
|
|
149
|
+
timeout=0.5, # 500ms timeout - don't wait long
|
|
150
|
+
headers={"Content-Type": "application/json"},
|
|
151
|
+
)
|
|
152
|
+
if DEBUG and response.status_code == 204:
|
|
153
|
+
print(f"✅ Emitted via HTTP POST: {event}", file=sys.stderr)
|
|
154
|
+
elif DEBUG and response.status_code != 204:
|
|
155
|
+
print(
|
|
156
|
+
f"⚠️ HTTP POST returned status {response.status_code} for: {event}",
|
|
157
|
+
file=sys.stderr,
|
|
158
|
+
)
|
|
159
|
+
except requests.exceptions.Timeout:
|
|
160
|
+
# Timeout is expected for fire-and-forget pattern
|
|
161
|
+
if DEBUG:
|
|
162
|
+
print(f"✅ HTTP POST sent (timeout OK): {event}", file=sys.stderr)
|
|
163
|
+
except requests.exceptions.ConnectionError:
|
|
164
|
+
# Server might not be running - this is OK
|
|
165
|
+
if DEBUG:
|
|
166
|
+
print(f"⚠️ Server not available for: {event}", file=sys.stderr)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
if DEBUG:
|
|
169
|
+
print(f"⚠️ Failed to emit via HTTP POST: {e}", file=sys.stderr)
|
|
170
|
+
elif DEBUG:
|
|
171
|
+
print(
|
|
172
|
+
"⚠️ requests module not available - cannot emit via HTTP",
|
|
173
|
+
file=sys.stderr,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Also publish to EventBus for any in-process subscribers
|
|
177
|
+
if self.event_bus and EVENTBUS_AVAILABLE:
|
|
178
|
+
try:
|
|
179
|
+
# Publish to EventBus with topic format: hook.{event}
|
|
180
|
+
topic = f"hook.{event}"
|
|
181
|
+
self.event_bus.publish(topic, claude_event_data)
|
|
182
|
+
if DEBUG:
|
|
183
|
+
print(f"✅ Published to EventBus: {topic}", file=sys.stderr)
|
|
184
|
+
except Exception as e:
|
|
185
|
+
if DEBUG:
|
|
186
|
+
print(f"⚠️ Failed to publish to EventBus: {e}", file=sys.stderr)
|
|
187
|
+
|
|
188
|
+
def cleanup(self):
|
|
189
|
+
"""Cleanup connections on service destruction."""
|
|
190
|
+
# Nothing to cleanup for HTTP POST approach
|
|
@@ -12,6 +12,7 @@ from .common_issues_check import CommonIssuesCheck
|
|
|
12
12
|
from .configuration_check import ConfigurationCheck
|
|
13
13
|
from .filesystem_check import FilesystemCheck
|
|
14
14
|
from .installation_check import InstallationCheck
|
|
15
|
+
from .instructions_check import InstructionsCheck
|
|
15
16
|
from .mcp_check import MCPCheck
|
|
16
17
|
from .monitor_check import MonitorCheck
|
|
17
18
|
from .startup_log_check import StartupLogCheck
|
|
@@ -24,6 +25,7 @@ __all__ = [
|
|
|
24
25
|
"ConfigurationCheck",
|
|
25
26
|
"FilesystemCheck",
|
|
26
27
|
"InstallationCheck",
|
|
28
|
+
"InstructionsCheck",
|
|
27
29
|
"MCPCheck",
|
|
28
30
|
"MonitorCheck",
|
|
29
31
|
"StartupLogCheck",
|
|
@@ -0,0 +1,418 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Check for duplicate or conflicting CLAUDE.md and instruction files.
|
|
3
|
+
|
|
4
|
+
WHY: Detect duplicate content, conflicting directives, and improperly placed
|
|
5
|
+
instruction files that could cause confusion in agent behavior.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import hashlib
|
|
9
|
+
import re
|
|
10
|
+
from collections import defaultdict
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Dict
|
|
13
|
+
|
|
14
|
+
from ..models import DiagnosticResult, DiagnosticStatus
|
|
15
|
+
from .base_check import BaseDiagnosticCheck
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InstructionsCheck(BaseDiagnosticCheck):
|
|
19
|
+
"""Check for duplicate, conflicting, or misplaced instruction files."""
|
|
20
|
+
|
|
21
|
+
# Known instruction file patterns
|
|
22
|
+
INSTRUCTION_FILES = {
|
|
23
|
+
"CLAUDE.md": "Claude Code instructions (should be in project root only)",
|
|
24
|
+
"INSTRUCTIONS.md": "MPM agent customization",
|
|
25
|
+
"BASE_PM.md": "Base PM framework requirements",
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
# Patterns that indicate potential conflicts
|
|
29
|
+
CONFLICT_PATTERNS = [
|
|
30
|
+
(r"(?i)you\s+are\s+.*pm", "PM role definition"),
|
|
31
|
+
(r"(?i)delegation\s+rules?", "Delegation rules"),
|
|
32
|
+
(r"(?i)agent\s+selection", "Agent selection logic"),
|
|
33
|
+
(r"(?i)framework\s+behavior", "Framework behavior"),
|
|
34
|
+
(r"(?i)command\s+interception", "Command interception"),
|
|
35
|
+
(r"(?i)memory\s+system", "Memory system configuration"),
|
|
36
|
+
(r"(?i)response\s+format", "Response formatting"),
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def name(self) -> str:
|
|
41
|
+
return "instructions_check"
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def category(self) -> str:
|
|
45
|
+
return "Instructions"
|
|
46
|
+
|
|
47
|
+
def run(self) -> DiagnosticResult:
|
|
48
|
+
"""Run instructions file diagnostics."""
|
|
49
|
+
try:
|
|
50
|
+
sub_results = []
|
|
51
|
+
details = {}
|
|
52
|
+
|
|
53
|
+
# Find all instruction files
|
|
54
|
+
instruction_files = self._find_instruction_files()
|
|
55
|
+
details["found_files"] = {
|
|
56
|
+
str(path): file_type for path, file_type in instruction_files.items()
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# Check for misplaced CLAUDE.md files
|
|
60
|
+
claude_result = self._check_claude_md_placement(instruction_files)
|
|
61
|
+
sub_results.append(claude_result)
|
|
62
|
+
|
|
63
|
+
# Check for duplicate content
|
|
64
|
+
duplicate_result = self._check_duplicates(instruction_files)
|
|
65
|
+
sub_results.append(duplicate_result)
|
|
66
|
+
|
|
67
|
+
# Check for conflicting directives
|
|
68
|
+
conflict_result = self._check_conflicts(instruction_files)
|
|
69
|
+
sub_results.append(conflict_result)
|
|
70
|
+
|
|
71
|
+
# Check for overlapping agent definitions
|
|
72
|
+
agent_result = self._check_agent_definitions(instruction_files)
|
|
73
|
+
sub_results.append(agent_result)
|
|
74
|
+
|
|
75
|
+
# Check proper separation of concerns
|
|
76
|
+
separation_result = self._check_separation_of_concerns(instruction_files)
|
|
77
|
+
sub_results.append(separation_result)
|
|
78
|
+
|
|
79
|
+
# Determine overall status
|
|
80
|
+
if any(r.status == DiagnosticStatus.ERROR for r in sub_results):
|
|
81
|
+
status = DiagnosticStatus.ERROR
|
|
82
|
+
message = "Found critical issues with instruction files"
|
|
83
|
+
elif any(r.status == DiagnosticStatus.WARNING for r in sub_results):
|
|
84
|
+
status = DiagnosticStatus.WARNING
|
|
85
|
+
message = "Found minor issues with instruction files"
|
|
86
|
+
else:
|
|
87
|
+
status = DiagnosticStatus.OK
|
|
88
|
+
message = "Instruction files are properly configured"
|
|
89
|
+
|
|
90
|
+
return DiagnosticResult(
|
|
91
|
+
category=self.category,
|
|
92
|
+
status=status,
|
|
93
|
+
message=message,
|
|
94
|
+
details=details,
|
|
95
|
+
sub_results=sub_results if self.verbose else [],
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
return DiagnosticResult(
|
|
100
|
+
category=self.category,
|
|
101
|
+
status=DiagnosticStatus.ERROR,
|
|
102
|
+
message=f"Instructions check failed: {e!s}",
|
|
103
|
+
details={"error": str(e)},
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
def _find_instruction_files(self) -> Dict[Path, str]:
|
|
107
|
+
"""Find all instruction files in the project and user directories."""
|
|
108
|
+
found_files = {}
|
|
109
|
+
|
|
110
|
+
# Search locations
|
|
111
|
+
search_paths = [
|
|
112
|
+
Path.cwd(), # Current project
|
|
113
|
+
Path.home() / ".claude-mpm", # User directory
|
|
114
|
+
Path.home() / ".claude", # Alternative user directory
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
for base_path in search_paths:
|
|
118
|
+
if not base_path.exists():
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
for pattern, file_type in self.INSTRUCTION_FILES.items():
|
|
122
|
+
# Use rglob for recursive search
|
|
123
|
+
for file_path in base_path.rglob(pattern):
|
|
124
|
+
# Skip node_modules and virtual environments
|
|
125
|
+
if any(
|
|
126
|
+
part in file_path.parts
|
|
127
|
+
for part in [
|
|
128
|
+
"node_modules",
|
|
129
|
+
"venv",
|
|
130
|
+
".venv",
|
|
131
|
+
"__pycache__",
|
|
132
|
+
".git",
|
|
133
|
+
]
|
|
134
|
+
):
|
|
135
|
+
continue
|
|
136
|
+
found_files[file_path] = file_type
|
|
137
|
+
|
|
138
|
+
return found_files
|
|
139
|
+
|
|
140
|
+
def _check_claude_md_placement(self, files: Dict[Path, str]) -> DiagnosticResult:
|
|
141
|
+
"""Check that CLAUDE.md files are properly placed."""
|
|
142
|
+
claude_files = [
|
|
143
|
+
path for path, file_type in files.items() if path.name == "CLAUDE.md"
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
if not claude_files:
|
|
147
|
+
return DiagnosticResult(
|
|
148
|
+
category="CLAUDE.md Placement",
|
|
149
|
+
status=DiagnosticStatus.OK,
|
|
150
|
+
message="No CLAUDE.md files found",
|
|
151
|
+
details={},
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
issues = []
|
|
155
|
+
project_root = Path.cwd()
|
|
156
|
+
|
|
157
|
+
for path in claude_files:
|
|
158
|
+
# CLAUDE.md should only be in project root
|
|
159
|
+
if path.parent != project_root:
|
|
160
|
+
rel_path = (
|
|
161
|
+
path.relative_to(project_root)
|
|
162
|
+
if project_root in path.parents or path.parent == project_root
|
|
163
|
+
else path
|
|
164
|
+
)
|
|
165
|
+
issues.append(
|
|
166
|
+
f"CLAUDE.md found in non-root location: {rel_path}\n"
|
|
167
|
+
f" → Should be in project root only for Claude Code"
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
if issues:
|
|
171
|
+
return DiagnosticResult(
|
|
172
|
+
category="CLAUDE.md Placement",
|
|
173
|
+
status=DiagnosticStatus.WARNING,
|
|
174
|
+
message=f"Found {len(issues)} misplaced CLAUDE.md file(s)",
|
|
175
|
+
details={"issues": issues},
|
|
176
|
+
fix_description=(
|
|
177
|
+
"CLAUDE.md should only exist in the project root directory. "
|
|
178
|
+
"Move or remove misplaced files."
|
|
179
|
+
),
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
return DiagnosticResult(
|
|
183
|
+
category="CLAUDE.md Placement",
|
|
184
|
+
status=DiagnosticStatus.OK,
|
|
185
|
+
message="CLAUDE.md properly placed in project root",
|
|
186
|
+
details={"count": len(claude_files)},
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
def _check_duplicates(self, files: Dict[Path, str]) -> DiagnosticResult:
|
|
190
|
+
"""Check for duplicate content between instruction files."""
|
|
191
|
+
if len(files) < 2:
|
|
192
|
+
return DiagnosticResult(
|
|
193
|
+
category="Duplicate Content",
|
|
194
|
+
status=DiagnosticStatus.OK,
|
|
195
|
+
message="No duplicate content detected",
|
|
196
|
+
details={},
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Calculate content hashes
|
|
200
|
+
content_hashes = {}
|
|
201
|
+
content_snippets = defaultdict(list)
|
|
202
|
+
|
|
203
|
+
for path in files:
|
|
204
|
+
try:
|
|
205
|
+
content = path.read_text(encoding="utf-8")
|
|
206
|
+
# Hash significant blocks (paragraphs)
|
|
207
|
+
paragraphs = re.split(r"\n\s*\n", content)
|
|
208
|
+
for para in paragraphs:
|
|
209
|
+
para = para.strip()
|
|
210
|
+
if len(para) > 50: # Skip short snippets
|
|
211
|
+
hash_val = hashlib.md5(para.encode()).hexdigest()
|
|
212
|
+
content_snippets[hash_val].append((path, para[:100]))
|
|
213
|
+
except Exception:
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
# Find duplicates
|
|
217
|
+
duplicates = []
|
|
218
|
+
for hash_val, occurrences in content_snippets.items():
|
|
219
|
+
if len(occurrences) > 1:
|
|
220
|
+
files_str = ", ".join(str(path) for path, _ in occurrences)
|
|
221
|
+
snippet = occurrences[0][1]
|
|
222
|
+
duplicates.append(
|
|
223
|
+
f"Duplicate content found in: {files_str}\n"
|
|
224
|
+
f" Snippet: {snippet}..."
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
if duplicates:
|
|
228
|
+
return DiagnosticResult(
|
|
229
|
+
category="Duplicate Content",
|
|
230
|
+
status=DiagnosticStatus.WARNING,
|
|
231
|
+
message=f"Found {len(duplicates)} duplicate content block(s)",
|
|
232
|
+
details={"duplicates": duplicates[:5]}, # Limit to first 5
|
|
233
|
+
fix_description=(
|
|
234
|
+
"Remove duplicate content between files. "
|
|
235
|
+
"CLAUDE.md should contain Claude Code instructions, "
|
|
236
|
+
"INSTRUCTIONS.md should contain MPM-specific customization."
|
|
237
|
+
),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return DiagnosticResult(
|
|
241
|
+
category="Duplicate Content",
|
|
242
|
+
status=DiagnosticStatus.OK,
|
|
243
|
+
message="No significant duplicate content found",
|
|
244
|
+
details={},
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
def _check_conflicts(self, files: Dict[Path, str]) -> DiagnosticResult:
|
|
248
|
+
"""Check for conflicting directives between instruction files."""
|
|
249
|
+
conflicts = []
|
|
250
|
+
pattern_occurrences = defaultdict(list)
|
|
251
|
+
|
|
252
|
+
for path in files:
|
|
253
|
+
try:
|
|
254
|
+
content = path.read_text(encoding="utf-8")
|
|
255
|
+
for pattern, description in self.CONFLICT_PATTERNS:
|
|
256
|
+
matches = re.findall(pattern, content, re.MULTILINE)
|
|
257
|
+
if matches:
|
|
258
|
+
pattern_occurrences[description].append(
|
|
259
|
+
(path, len(matches), matches[0][:100])
|
|
260
|
+
)
|
|
261
|
+
except Exception:
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
# Find patterns that appear in multiple files
|
|
265
|
+
for description, occurrences in pattern_occurrences.items():
|
|
266
|
+
if len(occurrences) > 1:
|
|
267
|
+
files_info = []
|
|
268
|
+
for path, count, snippet in occurrences:
|
|
269
|
+
rel_path = (
|
|
270
|
+
path.relative_to(Path.cwd())
|
|
271
|
+
if Path.cwd() in path.parents or path.parent == Path.cwd()
|
|
272
|
+
else path
|
|
273
|
+
)
|
|
274
|
+
files_info.append(f"{rel_path} ({count} occurrence(s))")
|
|
275
|
+
|
|
276
|
+
conflicts.append(
|
|
277
|
+
f"Potential conflict for '{description}':\n"
|
|
278
|
+
f" Found in: {', '.join(files_info)}"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
if conflicts:
|
|
282
|
+
return DiagnosticResult(
|
|
283
|
+
category="Conflicting Directives",
|
|
284
|
+
status=DiagnosticStatus.ERROR,
|
|
285
|
+
message=f"Found {len(conflicts)} potential conflict(s)",
|
|
286
|
+
details={"conflicts": conflicts},
|
|
287
|
+
fix_description=(
|
|
288
|
+
"Review and consolidate conflicting directives. "
|
|
289
|
+
"PM role and behavior should be in INSTRUCTIONS.md, "
|
|
290
|
+
"Claude Code directives should be in CLAUDE.md."
|
|
291
|
+
),
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
return DiagnosticResult(
|
|
295
|
+
category="Conflicting Directives",
|
|
296
|
+
status=DiagnosticStatus.OK,
|
|
297
|
+
message="No conflicting directives detected",
|
|
298
|
+
details={},
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
def _check_agent_definitions(self, files: Dict[Path, str]) -> DiagnosticResult:
|
|
302
|
+
"""Check for overlapping or duplicate agent definitions."""
|
|
303
|
+
agent_definitions = defaultdict(list)
|
|
304
|
+
agent_pattern = r"(?:agent|Agent)\s+(\w+).*?(?:specializes?|expert|handles?)"
|
|
305
|
+
|
|
306
|
+
for path in files:
|
|
307
|
+
try:
|
|
308
|
+
content = path.read_text(encoding="utf-8")
|
|
309
|
+
matches = re.findall(agent_pattern, content, re.IGNORECASE)
|
|
310
|
+
for agent_name in matches:
|
|
311
|
+
agent_definitions[agent_name.lower()].append(path)
|
|
312
|
+
except Exception:
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
# Find agents defined in multiple places
|
|
316
|
+
duplicates = []
|
|
317
|
+
for agent_name, paths in agent_definitions.items():
|
|
318
|
+
if len(paths) > 1:
|
|
319
|
+
files_str = ", ".join(
|
|
320
|
+
str(
|
|
321
|
+
path.relative_to(Path.cwd())
|
|
322
|
+
if Path.cwd() in path.parents or path.parent == Path.cwd()
|
|
323
|
+
else path
|
|
324
|
+
)
|
|
325
|
+
for path in paths
|
|
326
|
+
)
|
|
327
|
+
duplicates.append(
|
|
328
|
+
f"Agent '{agent_name}' defined in multiple files: {files_str}"
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
if duplicates:
|
|
332
|
+
return DiagnosticResult(
|
|
333
|
+
category="Agent Definitions",
|
|
334
|
+
status=DiagnosticStatus.WARNING,
|
|
335
|
+
message=f"Found {len(duplicates)} duplicate agent definition(s)",
|
|
336
|
+
details={"duplicates": duplicates},
|
|
337
|
+
fix_description=(
|
|
338
|
+
"Consolidate agent definitions in INSTRUCTIONS.md. "
|
|
339
|
+
"Each agent should be defined only once."
|
|
340
|
+
),
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
return DiagnosticResult(
|
|
344
|
+
category="Agent Definitions",
|
|
345
|
+
status=DiagnosticStatus.OK,
|
|
346
|
+
message="Agent definitions are unique",
|
|
347
|
+
details={"total_agents": len(agent_definitions)},
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
def _check_separation_of_concerns(self, files: Dict[Path, str]) -> DiagnosticResult:
|
|
351
|
+
"""Check that instruction files follow proper separation of concerns."""
|
|
352
|
+
issues = []
|
|
353
|
+
|
|
354
|
+
# Check for MPM-specific content in CLAUDE.md
|
|
355
|
+
claude_files = [path for path in files if path.name == "CLAUDE.md"]
|
|
356
|
+
for path in claude_files:
|
|
357
|
+
try:
|
|
358
|
+
content = path.read_text(encoding="utf-8")
|
|
359
|
+
# Check for MPM-specific patterns
|
|
360
|
+
mpm_patterns = [
|
|
361
|
+
r"(?i)multi-agent",
|
|
362
|
+
r"(?i)delegation",
|
|
363
|
+
r"(?i)agent\s+selection",
|
|
364
|
+
r"(?i)PM\s+role",
|
|
365
|
+
]
|
|
366
|
+
for pattern in mpm_patterns:
|
|
367
|
+
if re.search(pattern, content):
|
|
368
|
+
issues.append(
|
|
369
|
+
f"CLAUDE.md contains MPM-specific content (pattern: {pattern})\n"
|
|
370
|
+
f" → Move to INSTRUCTIONS.md"
|
|
371
|
+
)
|
|
372
|
+
break
|
|
373
|
+
except Exception:
|
|
374
|
+
continue
|
|
375
|
+
|
|
376
|
+
# Check for Claude Code specific content in INSTRUCTIONS.md
|
|
377
|
+
instructions_files = [
|
|
378
|
+
path for path in files if path.name == "INSTRUCTIONS.md"
|
|
379
|
+
]
|
|
380
|
+
for path in instructions_files:
|
|
381
|
+
try:
|
|
382
|
+
content = path.read_text(encoding="utf-8")
|
|
383
|
+
# Check for Claude Code specific patterns
|
|
384
|
+
claude_patterns = [
|
|
385
|
+
r"(?i)claude\s+code",
|
|
386
|
+
r"(?i)development\s+guidelines",
|
|
387
|
+
r"(?i)project\s+structure",
|
|
388
|
+
]
|
|
389
|
+
for pattern in claude_patterns:
|
|
390
|
+
if re.search(pattern, content):
|
|
391
|
+
issues.append(
|
|
392
|
+
f"INSTRUCTIONS.md contains Claude Code content (pattern: {pattern})\n"
|
|
393
|
+
f" → Should focus on MPM customization only"
|
|
394
|
+
)
|
|
395
|
+
break
|
|
396
|
+
except Exception:
|
|
397
|
+
continue
|
|
398
|
+
|
|
399
|
+
if issues:
|
|
400
|
+
return DiagnosticResult(
|
|
401
|
+
category="Separation of Concerns",
|
|
402
|
+
status=DiagnosticStatus.WARNING,
|
|
403
|
+
message=f"Found {len(issues)} separation of concerns issue(s)",
|
|
404
|
+
details={"issues": issues},
|
|
405
|
+
fix_description=(
|
|
406
|
+
"Maintain clear separation:\n"
|
|
407
|
+
"• CLAUDE.md: Claude Code development guidelines\n"
|
|
408
|
+
"• INSTRUCTIONS.md: MPM agent behavior and customization\n"
|
|
409
|
+
"• BASE_PM.md: Framework requirements (do not modify)"
|
|
410
|
+
),
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
return DiagnosticResult(
|
|
414
|
+
category="Separation of Concerns",
|
|
415
|
+
status=DiagnosticStatus.OK,
|
|
416
|
+
message="Instruction files properly separated",
|
|
417
|
+
details={},
|
|
418
|
+
)
|
|
@@ -18,6 +18,7 @@ from .checks import (
|
|
|
18
18
|
ConfigurationCheck,
|
|
19
19
|
FilesystemCheck,
|
|
20
20
|
InstallationCheck,
|
|
21
|
+
InstructionsCheck,
|
|
21
22
|
MCPCheck,
|
|
22
23
|
MonitorCheck,
|
|
23
24
|
StartupLogCheck,
|
|
@@ -48,6 +49,7 @@ class DiagnosticRunner:
|
|
|
48
49
|
InstallationCheck,
|
|
49
50
|
ConfigurationCheck,
|
|
50
51
|
FilesystemCheck,
|
|
52
|
+
InstructionsCheck, # Check instruction files early
|
|
51
53
|
ClaudeDesktopCheck,
|
|
52
54
|
AgentCheck,
|
|
53
55
|
MCPCheck,
|
|
@@ -107,9 +109,20 @@ class DiagnosticRunner:
|
|
|
107
109
|
|
|
108
110
|
# Group checks by dependency level
|
|
109
111
|
# Level 1: No dependencies
|
|
110
|
-
level1 = [
|
|
112
|
+
level1 = [
|
|
113
|
+
InstallationCheck,
|
|
114
|
+
FilesystemCheck,
|
|
115
|
+
ConfigurationCheck,
|
|
116
|
+
InstructionsCheck,
|
|
117
|
+
]
|
|
111
118
|
# Level 2: May depend on level 1
|
|
112
|
-
level2 = [
|
|
119
|
+
level2 = [
|
|
120
|
+
ClaudeDesktopCheck,
|
|
121
|
+
AgentCheck,
|
|
122
|
+
MCPCheck,
|
|
123
|
+
MonitorCheck,
|
|
124
|
+
StartupLogCheck,
|
|
125
|
+
]
|
|
113
126
|
# Level 3: Depends on others
|
|
114
127
|
level3 = [CommonIssuesCheck]
|
|
115
128
|
|