claude-mpm 4.1.2__py3-none-any.whl → 4.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/templates/engineer.json +33 -11
- claude_mpm/cli/commands/agents.py +556 -1009
- claude_mpm/cli/commands/memory.py +248 -927
- claude_mpm/cli/commands/run.py +139 -484
- claude_mpm/cli/startup_logging.py +76 -0
- claude_mpm/core/agent_registry.py +6 -10
- claude_mpm/core/framework_loader.py +114 -595
- claude_mpm/core/logging_config.py +2 -4
- claude_mpm/hooks/claude_hooks/event_handlers.py +7 -117
- claude_mpm/hooks/claude_hooks/hook_handler.py +91 -755
- claude_mpm/hooks/claude_hooks/hook_handler_original.py +1040 -0
- claude_mpm/hooks/claude_hooks/hook_handler_refactored.py +347 -0
- claude_mpm/hooks/claude_hooks/services/__init__.py +13 -0
- claude_mpm/hooks/claude_hooks/services/connection_manager.py +190 -0
- claude_mpm/hooks/claude_hooks/services/duplicate_detector.py +106 -0
- claude_mpm/hooks/claude_hooks/services/state_manager.py +282 -0
- claude_mpm/hooks/claude_hooks/services/subagent_processor.py +374 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +42 -454
- claude_mpm/services/agents/deployment/base_agent_locator.py +132 -0
- claude_mpm/services/agents/deployment/deployment_results_manager.py +185 -0
- claude_mpm/services/agents/deployment/single_agent_deployer.py +315 -0
- claude_mpm/services/agents/memory/agent_memory_manager.py +42 -508
- claude_mpm/services/agents/memory/memory_categorization_service.py +165 -0
- claude_mpm/services/agents/memory/memory_file_service.py +103 -0
- claude_mpm/services/agents/memory/memory_format_service.py +201 -0
- claude_mpm/services/agents/memory/memory_limits_service.py +99 -0
- claude_mpm/services/agents/registry/__init__.py +1 -1
- claude_mpm/services/cli/__init__.py +18 -0
- claude_mpm/services/cli/agent_cleanup_service.py +407 -0
- claude_mpm/services/cli/agent_dependency_service.py +395 -0
- claude_mpm/services/cli/agent_listing_service.py +463 -0
- claude_mpm/services/cli/agent_output_formatter.py +605 -0
- claude_mpm/services/cli/agent_validation_service.py +589 -0
- claude_mpm/services/cli/dashboard_launcher.py +424 -0
- claude_mpm/services/cli/memory_crud_service.py +617 -0
- claude_mpm/services/cli/memory_output_formatter.py +604 -0
- claude_mpm/services/cli/session_manager.py +513 -0
- claude_mpm/services/cli/socketio_manager.py +498 -0
- claude_mpm/services/cli/startup_checker.py +370 -0
- claude_mpm/services/core/cache_manager.py +311 -0
- claude_mpm/services/core/memory_manager.py +637 -0
- claude_mpm/services/core/path_resolver.py +498 -0
- claude_mpm/services/core/service_container.py +520 -0
- claude_mpm/services/core/service_interfaces.py +436 -0
- claude_mpm/services/diagnostics/checks/agent_check.py +65 -19
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/METADATA +1 -1
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/RECORD +52 -22
- claude_mpm/cli/commands/run_config_checker.py +0 -159
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/WHEEL +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Refactored Claude Code hook handler with modular service architecture.
|
|
3
|
+
|
|
4
|
+
This handler uses a service-oriented architecture with:
|
|
5
|
+
- StateManagerService: Manages state and delegation tracking
|
|
6
|
+
- ConnectionManagerService: Handles SocketIO and EventBus connections
|
|
7
|
+
- SubagentResponseProcessor: Processes complex subagent responses
|
|
8
|
+
- DuplicateEventDetector: Detects and filters duplicate events
|
|
9
|
+
|
|
10
|
+
WHY service-oriented approach:
|
|
11
|
+
- Better separation of concerns and modularity
|
|
12
|
+
- Easier testing and maintenance
|
|
13
|
+
- Reduced file size from 1040 to ~400 lines
|
|
14
|
+
- Clear service boundaries and responsibilities
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import os
|
|
19
|
+
import select
|
|
20
|
+
import signal
|
|
21
|
+
import sys
|
|
22
|
+
import threading
|
|
23
|
+
from datetime import datetime
|
|
24
|
+
|
|
25
|
+
# Import extracted modules with fallback for direct execution
|
|
26
|
+
try:
|
|
27
|
+
# Try relative imports first (when imported as module)
|
|
28
|
+
from .event_handlers import EventHandlers
|
|
29
|
+
from .memory_integration import MemoryHookManager
|
|
30
|
+
from .response_tracking import ResponseTrackingManager
|
|
31
|
+
from .services import (
|
|
32
|
+
ConnectionManagerService,
|
|
33
|
+
DuplicateEventDetector,
|
|
34
|
+
StateManagerService,
|
|
35
|
+
SubagentResponseProcessor,
|
|
36
|
+
)
|
|
37
|
+
except ImportError:
|
|
38
|
+
# Fall back to absolute imports (when run directly)
|
|
39
|
+
from pathlib import Path
|
|
40
|
+
|
|
41
|
+
# Add parent directory to path
|
|
42
|
+
sys.path.insert(0, str(Path(__file__).parent))
|
|
43
|
+
|
|
44
|
+
from event_handlers import EventHandlers
|
|
45
|
+
from memory_integration import MemoryHookManager
|
|
46
|
+
from response_tracking import ResponseTrackingManager
|
|
47
|
+
from services import (
|
|
48
|
+
ConnectionManagerService,
|
|
49
|
+
DuplicateEventDetector,
|
|
50
|
+
StateManagerService,
|
|
51
|
+
SubagentResponseProcessor,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Debug mode is enabled by default for better visibility into hook processing
|
|
55
|
+
# Set CLAUDE_MPM_HOOK_DEBUG=false to disable debug output
|
|
56
|
+
DEBUG = os.environ.get("CLAUDE_MPM_HOOK_DEBUG", "true").lower() != "false"
|
|
57
|
+
|
|
58
|
+
# Global singleton handler instance
|
|
59
|
+
_global_handler = None
|
|
60
|
+
_handler_lock = threading.Lock()
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class ClaudeHookHandler:
|
|
64
|
+
"""Refactored hook handler with service-oriented architecture.
|
|
65
|
+
|
|
66
|
+
WHY service-oriented approach:
|
|
67
|
+
- Modular design with clear service boundaries
|
|
68
|
+
- Each service handles a specific responsibility
|
|
69
|
+
- Easier to test, maintain, and extend
|
|
70
|
+
- Reduced complexity in main handler class
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self):
|
|
74
|
+
# Initialize services
|
|
75
|
+
self.state_manager = StateManagerService()
|
|
76
|
+
self.connection_manager = ConnectionManagerService()
|
|
77
|
+
self.duplicate_detector = DuplicateEventDetector()
|
|
78
|
+
|
|
79
|
+
# Initialize extracted managers
|
|
80
|
+
self.memory_hook_manager = MemoryHookManager()
|
|
81
|
+
self.response_tracking_manager = ResponseTrackingManager()
|
|
82
|
+
self.event_handlers = EventHandlers(self)
|
|
83
|
+
|
|
84
|
+
# Initialize subagent processor with dependencies
|
|
85
|
+
self.subagent_processor = SubagentResponseProcessor(
|
|
86
|
+
self.state_manager, self.response_tracking_manager, self.connection_manager
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def handle(self):
|
|
90
|
+
"""Process hook event with minimal overhead and timeout protection.
|
|
91
|
+
|
|
92
|
+
WHY this approach:
|
|
93
|
+
- Fast path processing for minimal latency (no blocking waits)
|
|
94
|
+
- Non-blocking Socket.IO connection and event emission
|
|
95
|
+
- Timeout protection prevents indefinite hangs
|
|
96
|
+
- Connection timeout prevents indefinite hangs
|
|
97
|
+
- Graceful degradation if Socket.IO unavailable
|
|
98
|
+
- Always continues regardless of event status
|
|
99
|
+
- Process exits after handling to prevent accumulation
|
|
100
|
+
"""
|
|
101
|
+
_continue_sent = False # Track if continue has been sent
|
|
102
|
+
|
|
103
|
+
def timeout_handler(signum, frame):
|
|
104
|
+
"""Handle timeout by forcing exit."""
|
|
105
|
+
nonlocal _continue_sent
|
|
106
|
+
if DEBUG:
|
|
107
|
+
print(f"Hook handler timeout (pid: {os.getpid()})", file=sys.stderr)
|
|
108
|
+
if not _continue_sent:
|
|
109
|
+
self._continue_execution()
|
|
110
|
+
_continue_sent = True
|
|
111
|
+
sys.exit(0)
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
# Set a 10-second timeout for the entire operation
|
|
115
|
+
signal.signal(signal.SIGALRM, timeout_handler)
|
|
116
|
+
signal.alarm(10)
|
|
117
|
+
|
|
118
|
+
# Read and parse event
|
|
119
|
+
event = self._read_hook_event()
|
|
120
|
+
if not event:
|
|
121
|
+
if not _continue_sent:
|
|
122
|
+
self._continue_execution()
|
|
123
|
+
_continue_sent = True
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
# Check for duplicate events (same event within 100ms)
|
|
127
|
+
if self.duplicate_detector.is_duplicate(event):
|
|
128
|
+
if DEBUG:
|
|
129
|
+
print(
|
|
130
|
+
f"[{datetime.now().isoformat()}] Skipping duplicate event: {event.get('hook_event_name', 'unknown')} (PID: {os.getpid()})",
|
|
131
|
+
file=sys.stderr,
|
|
132
|
+
)
|
|
133
|
+
# Still need to output continue for this invocation
|
|
134
|
+
if not _continue_sent:
|
|
135
|
+
self._continue_execution()
|
|
136
|
+
_continue_sent = True
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
# Debug: Log that we're processing an event
|
|
140
|
+
if DEBUG:
|
|
141
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
142
|
+
print(
|
|
143
|
+
f"\n[{datetime.now().isoformat()}] Processing hook event: {hook_type} (PID: {os.getpid()})",
|
|
144
|
+
file=sys.stderr,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Perform periodic cleanup if needed
|
|
148
|
+
if self.state_manager.increment_events_processed():
|
|
149
|
+
self.state_manager.cleanup_old_entries()
|
|
150
|
+
if DEBUG:
|
|
151
|
+
print(
|
|
152
|
+
f"🧹 Performed cleanup after {self.state_manager.events_processed} events",
|
|
153
|
+
file=sys.stderr,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Route event to appropriate handler
|
|
157
|
+
self._route_event(event)
|
|
158
|
+
|
|
159
|
+
# Always continue execution (only if not already sent)
|
|
160
|
+
if not _continue_sent:
|
|
161
|
+
self._continue_execution()
|
|
162
|
+
_continue_sent = True
|
|
163
|
+
|
|
164
|
+
except Exception:
|
|
165
|
+
# Fail fast and silent (only send continue if not already sent)
|
|
166
|
+
if not _continue_sent:
|
|
167
|
+
self._continue_execution()
|
|
168
|
+
_continue_sent = True
|
|
169
|
+
finally:
|
|
170
|
+
# Cancel the alarm
|
|
171
|
+
signal.alarm(0)
|
|
172
|
+
|
|
173
|
+
def _read_hook_event(self) -> dict:
|
|
174
|
+
"""
|
|
175
|
+
Read and parse hook event from stdin with timeout.
|
|
176
|
+
|
|
177
|
+
WHY: Centralized event reading with error handling and timeout
|
|
178
|
+
ensures consistent parsing and validation while preventing
|
|
179
|
+
processes from hanging indefinitely on stdin.read().
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Parsed event dictionary or None if invalid/timeout
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
# Check if data is available on stdin with 1 second timeout
|
|
186
|
+
if sys.stdin.isatty():
|
|
187
|
+
# Interactive terminal - no data expected
|
|
188
|
+
return None
|
|
189
|
+
|
|
190
|
+
ready, _, _ = select.select([sys.stdin], [], [], 1.0)
|
|
191
|
+
if not ready:
|
|
192
|
+
# No data available within timeout
|
|
193
|
+
if DEBUG:
|
|
194
|
+
print("No hook event data received within timeout", file=sys.stderr)
|
|
195
|
+
return None
|
|
196
|
+
|
|
197
|
+
# Data is available, read it
|
|
198
|
+
event_data = sys.stdin.read()
|
|
199
|
+
if not event_data.strip():
|
|
200
|
+
# Empty or whitespace-only data
|
|
201
|
+
return None
|
|
202
|
+
|
|
203
|
+
return json.loads(event_data)
|
|
204
|
+
except (json.JSONDecodeError, ValueError) as e:
|
|
205
|
+
if DEBUG:
|
|
206
|
+
print(f"Failed to parse hook event: {e}", file=sys.stderr)
|
|
207
|
+
return None
|
|
208
|
+
except Exception as e:
|
|
209
|
+
if DEBUG:
|
|
210
|
+
print(f"Error reading hook event: {e}", file=sys.stderr)
|
|
211
|
+
return None
|
|
212
|
+
|
|
213
|
+
def _route_event(self, event: dict) -> None:
|
|
214
|
+
"""
|
|
215
|
+
Route event to appropriate handler based on type.
|
|
216
|
+
|
|
217
|
+
WHY: Centralized routing reduces complexity and makes
|
|
218
|
+
it easier to add new event types.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
event: Hook event dictionary
|
|
222
|
+
"""
|
|
223
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
224
|
+
|
|
225
|
+
# Map event types to handlers
|
|
226
|
+
event_handlers = {
|
|
227
|
+
"UserPromptSubmit": self.event_handlers.handle_user_prompt_fast,
|
|
228
|
+
"PreToolUse": self.event_handlers.handle_pre_tool_fast,
|
|
229
|
+
"PostToolUse": self.event_handlers.handle_post_tool_fast,
|
|
230
|
+
"Notification": self.event_handlers.handle_notification_fast,
|
|
231
|
+
"Stop": self.event_handlers.handle_stop_fast,
|
|
232
|
+
"SubagentStop": self.handle_subagent_stop,
|
|
233
|
+
"AssistantResponse": self.event_handlers.handle_assistant_response,
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
# Call appropriate handler if exists
|
|
237
|
+
handler = event_handlers.get(hook_type)
|
|
238
|
+
if handler:
|
|
239
|
+
try:
|
|
240
|
+
handler(event)
|
|
241
|
+
except Exception as e:
|
|
242
|
+
if DEBUG:
|
|
243
|
+
print(f"Error handling {hook_type}: {e}", file=sys.stderr)
|
|
244
|
+
|
|
245
|
+
def handle_subagent_stop(self, event: dict):
|
|
246
|
+
"""Delegate subagent stop processing to the specialized processor."""
|
|
247
|
+
self.subagent_processor.process_subagent_stop(event)
|
|
248
|
+
|
|
249
|
+
def _continue_execution(self) -> None:
|
|
250
|
+
"""
|
|
251
|
+
Send continue action to Claude.
|
|
252
|
+
|
|
253
|
+
WHY: Centralized response ensures consistent format
|
|
254
|
+
and makes it easier to add response modifications.
|
|
255
|
+
"""
|
|
256
|
+
print(json.dumps({"action": "continue"}))
|
|
257
|
+
|
|
258
|
+
# Delegation methods for compatibility with event_handlers
|
|
259
|
+
def _track_delegation(self, session_id: str, agent_type: str, request_data=None):
|
|
260
|
+
"""Track delegation through state manager."""
|
|
261
|
+
self.state_manager.track_delegation(session_id, agent_type, request_data)
|
|
262
|
+
|
|
263
|
+
def _get_delegation_agent_type(self, session_id: str) -> str:
|
|
264
|
+
"""Get delegation agent type through state manager."""
|
|
265
|
+
return self.state_manager.get_delegation_agent_type(session_id)
|
|
266
|
+
|
|
267
|
+
def _get_git_branch(self, working_dir=None) -> str:
|
|
268
|
+
"""Get git branch through state manager."""
|
|
269
|
+
return self.state_manager.get_git_branch(working_dir)
|
|
270
|
+
|
|
271
|
+
def _emit_socketio_event(self, namespace: str, event: str, data: dict):
|
|
272
|
+
"""Emit event through connection manager."""
|
|
273
|
+
self.connection_manager.emit_event(namespace, event, data)
|
|
274
|
+
|
|
275
|
+
def __del__(self):
|
|
276
|
+
"""Cleanup on handler destruction."""
|
|
277
|
+
# Clean up connection manager if it exists
|
|
278
|
+
if hasattr(self, "connection_manager") and self.connection_manager:
|
|
279
|
+
try:
|
|
280
|
+
self.connection_manager.cleanup()
|
|
281
|
+
except:
|
|
282
|
+
pass # Ignore cleanup errors during destruction
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def main():
|
|
286
|
+
"""Entry point with singleton pattern and proper cleanup."""
|
|
287
|
+
global _global_handler
|
|
288
|
+
_continue_printed = False # Track if we've already printed continue
|
|
289
|
+
|
|
290
|
+
def cleanup_handler(signum=None, frame=None):
|
|
291
|
+
"""Cleanup handler for signals and exit."""
|
|
292
|
+
nonlocal _continue_printed
|
|
293
|
+
if DEBUG:
|
|
294
|
+
print(
|
|
295
|
+
f"Hook handler cleanup (pid: {os.getpid()}, signal: {signum})",
|
|
296
|
+
file=sys.stderr,
|
|
297
|
+
)
|
|
298
|
+
# Only output continue if we haven't already (i.e., if interrupted by signal)
|
|
299
|
+
if signum is not None and not _continue_printed:
|
|
300
|
+
print(json.dumps({"action": "continue"}))
|
|
301
|
+
_continue_printed = True
|
|
302
|
+
sys.exit(0)
|
|
303
|
+
|
|
304
|
+
# Register cleanup handlers
|
|
305
|
+
signal.signal(signal.SIGTERM, cleanup_handler)
|
|
306
|
+
signal.signal(signal.SIGINT, cleanup_handler)
|
|
307
|
+
# Don't register atexit handler since we're handling exit properly in main
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
# Use singleton pattern to prevent creating multiple instances
|
|
311
|
+
with _handler_lock:
|
|
312
|
+
if _global_handler is None:
|
|
313
|
+
_global_handler = ClaudeHookHandler()
|
|
314
|
+
if DEBUG:
|
|
315
|
+
print(
|
|
316
|
+
f"✅ Created new ClaudeHookHandler singleton (pid: {os.getpid()})",
|
|
317
|
+
file=sys.stderr,
|
|
318
|
+
)
|
|
319
|
+
elif DEBUG:
|
|
320
|
+
print(
|
|
321
|
+
f"♻️ Reusing existing ClaudeHookHandler singleton (pid: {os.getpid()})",
|
|
322
|
+
file=sys.stderr,
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
handler = _global_handler
|
|
326
|
+
|
|
327
|
+
# Mark that handle() will print continue
|
|
328
|
+
handler.handle()
|
|
329
|
+
_continue_printed = True # Mark as printed since handle() always prints it
|
|
330
|
+
|
|
331
|
+
# handler.handle() already calls _continue_execution(), so we don't need to do it again
|
|
332
|
+
# Just exit cleanly
|
|
333
|
+
sys.exit(0)
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
# Only output continue if not already printed
|
|
337
|
+
if not _continue_printed:
|
|
338
|
+
print(json.dumps({"action": "continue"}))
|
|
339
|
+
_continue_printed = True
|
|
340
|
+
# Log error for debugging
|
|
341
|
+
if DEBUG:
|
|
342
|
+
print(f"Hook handler error: {e}", file=sys.stderr)
|
|
343
|
+
sys.exit(0) # Exit cleanly even on error
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
if __name__ == "__main__":
|
|
347
|
+
main()
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Hook handler services for modular functionality."""
|
|
2
|
+
|
|
3
|
+
from .connection_manager import ConnectionManagerService
|
|
4
|
+
from .duplicate_detector import DuplicateEventDetector
|
|
5
|
+
from .state_manager import StateManagerService
|
|
6
|
+
from .subagent_processor import SubagentResponseProcessor
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"ConnectionManagerService",
|
|
10
|
+
"DuplicateEventDetector",
|
|
11
|
+
"StateManagerService",
|
|
12
|
+
"SubagentResponseProcessor",
|
|
13
|
+
]
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""Connection management service for Claude hook handler.
|
|
2
|
+
|
|
3
|
+
This service manages:
|
|
4
|
+
- SocketIO connection pool initialization
|
|
5
|
+
- EventBus initialization
|
|
6
|
+
- Event emission through both channels
|
|
7
|
+
- Connection cleanup
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
|
|
14
|
+
# Debug mode is enabled by default for better visibility into hook processing
|
|
15
|
+
DEBUG = os.environ.get("CLAUDE_MPM_HOOK_DEBUG", "true").lower() != "false"
|
|
16
|
+
|
|
17
|
+
# Import extracted modules with fallback for direct execution
|
|
18
|
+
try:
|
|
19
|
+
# Try relative imports first (when imported as module)
|
|
20
|
+
# Use the modern SocketIOConnectionPool instead of the deprecated local one
|
|
21
|
+
from claude_mpm.core.socketio_pool import get_connection_pool
|
|
22
|
+
except ImportError:
|
|
23
|
+
# Fall back to absolute imports (when run directly)
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
|
|
26
|
+
# Add parent directory to path
|
|
27
|
+
sys.path.insert(0, str(Path(__file__).parent))
|
|
28
|
+
|
|
29
|
+
# Try to import get_connection_pool from deprecated location
|
|
30
|
+
try:
|
|
31
|
+
from connection_pool import SocketIOConnectionPool
|
|
32
|
+
|
|
33
|
+
def get_connection_pool():
|
|
34
|
+
return SocketIOConnectionPool()
|
|
35
|
+
|
|
36
|
+
except ImportError:
|
|
37
|
+
get_connection_pool = None
|
|
38
|
+
|
|
39
|
+
# Import EventNormalizer for consistent event formatting
|
|
40
|
+
try:
|
|
41
|
+
from claude_mpm.services.socketio.event_normalizer import EventNormalizer
|
|
42
|
+
except ImportError:
|
|
43
|
+
# Create a simple fallback EventNormalizer if import fails
|
|
44
|
+
class EventNormalizer:
|
|
45
|
+
def normalize(self, event_data, source="hook"):
|
|
46
|
+
"""Simple fallback normalizer that returns event as-is."""
|
|
47
|
+
return type(
|
|
48
|
+
"NormalizedEvent",
|
|
49
|
+
(),
|
|
50
|
+
{
|
|
51
|
+
"to_dict": lambda: {
|
|
52
|
+
"event": "claude_event",
|
|
53
|
+
"type": event_data.get("type", "unknown"),
|
|
54
|
+
"subtype": event_data.get("subtype", "generic"),
|
|
55
|
+
"timestamp": event_data.get(
|
|
56
|
+
"timestamp", datetime.now().isoformat()
|
|
57
|
+
),
|
|
58
|
+
"data": event_data.get("data", event_data),
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# Import EventBus for decoupled event distribution
|
|
65
|
+
try:
|
|
66
|
+
from claude_mpm.services.event_bus import EventBus
|
|
67
|
+
|
|
68
|
+
EVENTBUS_AVAILABLE = True
|
|
69
|
+
except ImportError:
|
|
70
|
+
EVENTBUS_AVAILABLE = False
|
|
71
|
+
EventBus = None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ConnectionManagerService:
|
|
75
|
+
"""Manages connections for the Claude hook handler."""
|
|
76
|
+
|
|
77
|
+
def __init__(self):
|
|
78
|
+
"""Initialize connection management service."""
|
|
79
|
+
# Event normalizer for consistent event schema
|
|
80
|
+
self.event_normalizer = EventNormalizer()
|
|
81
|
+
|
|
82
|
+
# Initialize SocketIO connection pool for inter-process communication
|
|
83
|
+
# This sends events directly to the Socket.IO server in the daemon process
|
|
84
|
+
self.connection_pool = None
|
|
85
|
+
self._initialize_socketio_pool()
|
|
86
|
+
|
|
87
|
+
# Initialize EventBus for in-process event distribution (optional)
|
|
88
|
+
self.event_bus = None
|
|
89
|
+
self._initialize_eventbus()
|
|
90
|
+
|
|
91
|
+
def _initialize_socketio_pool(self):
|
|
92
|
+
"""Initialize the SocketIO connection pool."""
|
|
93
|
+
try:
|
|
94
|
+
self.connection_pool = get_connection_pool()
|
|
95
|
+
if DEBUG:
|
|
96
|
+
print("✅ Modern SocketIO connection pool initialized", file=sys.stderr)
|
|
97
|
+
except Exception as e:
|
|
98
|
+
if DEBUG:
|
|
99
|
+
print(
|
|
100
|
+
f"⚠️ Failed to initialize SocketIO connection pool: {e}",
|
|
101
|
+
file=sys.stderr,
|
|
102
|
+
)
|
|
103
|
+
self.connection_pool = None
|
|
104
|
+
|
|
105
|
+
def _initialize_eventbus(self):
|
|
106
|
+
"""Initialize the EventBus for in-process distribution."""
|
|
107
|
+
if EVENTBUS_AVAILABLE:
|
|
108
|
+
try:
|
|
109
|
+
self.event_bus = EventBus.get_instance()
|
|
110
|
+
if DEBUG:
|
|
111
|
+
print("✅ EventBus initialized for hook handler", file=sys.stderr)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
if DEBUG:
|
|
114
|
+
print(f"⚠️ Failed to initialize EventBus: {e}", file=sys.stderr)
|
|
115
|
+
self.event_bus = None
|
|
116
|
+
|
|
117
|
+
def emit_event(self, namespace: str, event: str, data: dict):
|
|
118
|
+
"""Emit event through both connection pool and EventBus.
|
|
119
|
+
|
|
120
|
+
WHY dual approach:
|
|
121
|
+
- Connection pool: Direct Socket.IO connection for inter-process communication
|
|
122
|
+
- EventBus: For in-process subscribers (if any)
|
|
123
|
+
- Ensures events reach the dashboard regardless of process boundaries
|
|
124
|
+
"""
|
|
125
|
+
# Create event data for normalization
|
|
126
|
+
raw_event = {
|
|
127
|
+
"type": "hook",
|
|
128
|
+
"subtype": event, # e.g., "user_prompt", "pre_tool", "subagent_stop"
|
|
129
|
+
"timestamp": datetime.now().isoformat(),
|
|
130
|
+
"data": data,
|
|
131
|
+
"source": "claude_hooks", # Identify the source
|
|
132
|
+
"session_id": data.get("sessionId"), # Include session if available
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
# Normalize the event using EventNormalizer for consistent schema
|
|
136
|
+
normalized_event = self.event_normalizer.normalize(raw_event, source="hook")
|
|
137
|
+
claude_event_data = normalized_event.to_dict()
|
|
138
|
+
|
|
139
|
+
# Log important events for debugging
|
|
140
|
+
if DEBUG and event in ["subagent_stop", "pre_tool"]:
|
|
141
|
+
if event == "subagent_stop":
|
|
142
|
+
agent_type = data.get("agent_type", "unknown")
|
|
143
|
+
print(
|
|
144
|
+
f"Hook handler: Publishing SubagentStop for agent '{agent_type}'",
|
|
145
|
+
file=sys.stderr,
|
|
146
|
+
)
|
|
147
|
+
elif event == "pre_tool" and data.get("tool_name") == "Task":
|
|
148
|
+
delegation = data.get("delegation_details", {})
|
|
149
|
+
agent_type = delegation.get("agent_type", "unknown")
|
|
150
|
+
print(
|
|
151
|
+
f"Hook handler: Publishing Task delegation to agent '{agent_type}'",
|
|
152
|
+
file=sys.stderr,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# First, try to emit through direct Socket.IO connection pool
|
|
156
|
+
# This is the primary path for inter-process communication
|
|
157
|
+
if self.connection_pool:
|
|
158
|
+
try:
|
|
159
|
+
# Emit to Socket.IO server directly
|
|
160
|
+
self.connection_pool.emit("claude_event", claude_event_data)
|
|
161
|
+
if DEBUG:
|
|
162
|
+
print(f"✅ Emitted via connection pool: {event}", file=sys.stderr)
|
|
163
|
+
except Exception as e:
|
|
164
|
+
if DEBUG:
|
|
165
|
+
print(f"⚠️ Failed to emit via connection pool: {e}", file=sys.stderr)
|
|
166
|
+
|
|
167
|
+
# Also publish to EventBus for any in-process subscribers
|
|
168
|
+
if self.event_bus and EVENTBUS_AVAILABLE:
|
|
169
|
+
try:
|
|
170
|
+
# Publish to EventBus with topic format: hook.{event}
|
|
171
|
+
topic = f"hook.{event}"
|
|
172
|
+
self.event_bus.publish(topic, claude_event_data)
|
|
173
|
+
if DEBUG:
|
|
174
|
+
print(f"✅ Published to EventBus: {topic}", file=sys.stderr)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
if DEBUG:
|
|
177
|
+
print(f"⚠️ Failed to publish to EventBus: {e}", file=sys.stderr)
|
|
178
|
+
|
|
179
|
+
# Warn if neither method is available
|
|
180
|
+
if not self.connection_pool and not self.event_bus and DEBUG:
|
|
181
|
+
print(f"⚠️ No event emission method available for: {event}", file=sys.stderr)
|
|
182
|
+
|
|
183
|
+
def cleanup(self):
|
|
184
|
+
"""Cleanup connections on service destruction."""
|
|
185
|
+
# Clean up connection pool if it exists
|
|
186
|
+
if self.connection_pool:
|
|
187
|
+
try:
|
|
188
|
+
self.connection_pool.cleanup()
|
|
189
|
+
except:
|
|
190
|
+
pass # Ignore cleanup errors during destruction
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""Duplicate event detection service for Claude hook handler.
|
|
2
|
+
|
|
3
|
+
This service manages:
|
|
4
|
+
- Event key generation
|
|
5
|
+
- Duplicate event detection within time windows
|
|
6
|
+
- Recent event tracking
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
from collections import deque
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DuplicateEventDetector:
|
|
15
|
+
"""Detects and filters duplicate events."""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self, max_recent_events: int = 10, duplicate_window_seconds: float = 0.1
|
|
19
|
+
):
|
|
20
|
+
"""Initialize duplicate event detector.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
max_recent_events: Maximum number of recent events to track
|
|
24
|
+
duplicate_window_seconds: Time window in seconds for duplicate detection
|
|
25
|
+
"""
|
|
26
|
+
# Track recent events to detect duplicates
|
|
27
|
+
self._recent_events = deque(maxlen=max_recent_events)
|
|
28
|
+
self._events_lock = threading.Lock()
|
|
29
|
+
self.duplicate_window_seconds = duplicate_window_seconds
|
|
30
|
+
|
|
31
|
+
def is_duplicate(self, event: dict) -> bool:
|
|
32
|
+
"""Check if an event is a duplicate of a recent event.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
event: The event dictionary to check
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
True if the event is a duplicate, False otherwise
|
|
39
|
+
"""
|
|
40
|
+
event_key = self.generate_event_key(event)
|
|
41
|
+
current_time = time.time()
|
|
42
|
+
|
|
43
|
+
with self._events_lock:
|
|
44
|
+
# Check if we've seen this event recently
|
|
45
|
+
for recent_key, recent_time in self._recent_events:
|
|
46
|
+
if (
|
|
47
|
+
recent_key == event_key
|
|
48
|
+
and (current_time - recent_time) < self.duplicate_window_seconds
|
|
49
|
+
):
|
|
50
|
+
return True
|
|
51
|
+
|
|
52
|
+
# Not a duplicate, record it
|
|
53
|
+
self._recent_events.append((event_key, current_time))
|
|
54
|
+
return False
|
|
55
|
+
|
|
56
|
+
def generate_event_key(self, event: dict) -> str:
|
|
57
|
+
"""Generate a unique key for an event to detect duplicates.
|
|
58
|
+
|
|
59
|
+
WHY: Claude Code may call the hook multiple times for the same event
|
|
60
|
+
because the hook is registered for multiple event types. We need to
|
|
61
|
+
detect and skip duplicate processing while still returning continue.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
event: The event dictionary
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
A unique string key for the event
|
|
68
|
+
"""
|
|
69
|
+
# Create a key from event type, session_id, and key data
|
|
70
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
71
|
+
session_id = event.get("session_id", "")
|
|
72
|
+
|
|
73
|
+
# Add type-specific data to make the key unique
|
|
74
|
+
if hook_type == "PreToolUse":
|
|
75
|
+
tool_name = event.get("tool_name", "")
|
|
76
|
+
# For some tools, include parameters to distinguish calls
|
|
77
|
+
if tool_name == "Task":
|
|
78
|
+
tool_input = event.get("tool_input", {})
|
|
79
|
+
agent = tool_input.get("subagent_type", "")
|
|
80
|
+
prompt_preview = (
|
|
81
|
+
tool_input.get("prompt", "") or tool_input.get("description", "")
|
|
82
|
+
)[:50]
|
|
83
|
+
return f"{hook_type}:{session_id}:{tool_name}:{agent}:{prompt_preview}"
|
|
84
|
+
return f"{hook_type}:{session_id}:{tool_name}"
|
|
85
|
+
|
|
86
|
+
if hook_type == "UserPromptSubmit":
|
|
87
|
+
prompt_preview = event.get("prompt", "")[:50]
|
|
88
|
+
return f"{hook_type}:{session_id}:{prompt_preview}"
|
|
89
|
+
|
|
90
|
+
# For other events, just use type and session
|
|
91
|
+
return f"{hook_type}:{session_id}"
|
|
92
|
+
|
|
93
|
+
def clear_old_events(self):
|
|
94
|
+
"""Clear events older than the duplicate window."""
|
|
95
|
+
current_time = time.time()
|
|
96
|
+
cutoff_time = current_time - self.duplicate_window_seconds
|
|
97
|
+
|
|
98
|
+
with self._events_lock:
|
|
99
|
+
# Create a new deque with only recent events
|
|
100
|
+
recent_only = deque(
|
|
101
|
+
(key, timestamp)
|
|
102
|
+
for key, timestamp in self._recent_events
|
|
103
|
+
if timestamp > cutoff_time
|
|
104
|
+
)
|
|
105
|
+
recent_only.maxlen = self._recent_events.maxlen
|
|
106
|
+
self._recent_events = recent_only
|