claude-mpm 4.1.2__py3-none-any.whl → 4.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/BASE_AGENT_TEMPLATE.md +16 -19
- claude_mpm/agents/MEMORY.md +21 -49
- claude_mpm/agents/templates/OPTIMIZATION_REPORT.md +156 -0
- claude_mpm/agents/templates/api_qa.json +36 -116
- claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json +42 -9
- claude_mpm/agents/templates/backup/documentation_agent_20250726_234551.json +29 -6
- claude_mpm/agents/templates/backup/engineer_agent_20250726_234551.json +34 -6
- claude_mpm/agents/templates/backup/ops_agent_20250726_234551.json +41 -9
- claude_mpm/agents/templates/backup/qa_agent_20250726_234551.json +30 -8
- claude_mpm/agents/templates/backup/research_agent_2025011_234551.json +2 -2
- claude_mpm/agents/templates/backup/research_agent_20250726_234551.json +29 -6
- claude_mpm/agents/templates/backup/research_memory_efficient.json +2 -2
- claude_mpm/agents/templates/backup/security_agent_20250726_234551.json +41 -9
- claude_mpm/agents/templates/backup/version_control_agent_20250726_234551.json +23 -7
- claude_mpm/agents/templates/code_analyzer.json +18 -36
- claude_mpm/agents/templates/data_engineer.json +43 -14
- claude_mpm/agents/templates/documentation.json +55 -74
- claude_mpm/agents/templates/engineer.json +57 -40
- claude_mpm/agents/templates/imagemagick.json +7 -2
- claude_mpm/agents/templates/memory_manager.json +1 -1
- claude_mpm/agents/templates/ops.json +36 -4
- claude_mpm/agents/templates/project_organizer.json +23 -71
- claude_mpm/agents/templates/qa.json +34 -2
- claude_mpm/agents/templates/refactoring_engineer.json +9 -5
- claude_mpm/agents/templates/research.json +36 -4
- claude_mpm/agents/templates/security.json +29 -2
- claude_mpm/agents/templates/ticketing.json +3 -3
- claude_mpm/agents/templates/vercel_ops_agent.json +2 -2
- claude_mpm/agents/templates/version_control.json +28 -2
- claude_mpm/agents/templates/web_qa.json +38 -151
- claude_mpm/agents/templates/web_ui.json +2 -2
- claude_mpm/cli/commands/agent_manager.py +221 -1
- claude_mpm/cli/commands/agents.py +556 -1009
- claude_mpm/cli/commands/memory.py +248 -927
- claude_mpm/cli/commands/run.py +139 -484
- claude_mpm/cli/parsers/agent_manager_parser.py +34 -0
- claude_mpm/cli/startup_logging.py +76 -0
- claude_mpm/core/agent_registry.py +6 -10
- claude_mpm/core/framework_loader.py +205 -595
- claude_mpm/core/log_manager.py +49 -1
- claude_mpm/core/logging_config.py +2 -4
- claude_mpm/hooks/claude_hooks/event_handlers.py +7 -117
- claude_mpm/hooks/claude_hooks/hook_handler.py +91 -755
- claude_mpm/hooks/claude_hooks/hook_handler_original.py +1040 -0
- claude_mpm/hooks/claude_hooks/hook_handler_refactored.py +347 -0
- claude_mpm/hooks/claude_hooks/services/__init__.py +13 -0
- claude_mpm/hooks/claude_hooks/services/connection_manager.py +190 -0
- claude_mpm/hooks/claude_hooks/services/duplicate_detector.py +106 -0
- claude_mpm/hooks/claude_hooks/services/state_manager.py +282 -0
- claude_mpm/hooks/claude_hooks/services/subagent_processor.py +374 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +42 -454
- claude_mpm/services/agents/deployment/base_agent_locator.py +132 -0
- claude_mpm/services/agents/deployment/deployment_results_manager.py +185 -0
- claude_mpm/services/agents/deployment/single_agent_deployer.py +315 -0
- claude_mpm/services/agents/memory/agent_memory_manager.py +42 -508
- claude_mpm/services/agents/memory/memory_categorization_service.py +165 -0
- claude_mpm/services/agents/memory/memory_file_service.py +103 -0
- claude_mpm/services/agents/memory/memory_format_service.py +201 -0
- claude_mpm/services/agents/memory/memory_limits_service.py +99 -0
- claude_mpm/services/agents/registry/__init__.py +1 -1
- claude_mpm/services/cli/__init__.py +18 -0
- claude_mpm/services/cli/agent_cleanup_service.py +407 -0
- claude_mpm/services/cli/agent_dependency_service.py +395 -0
- claude_mpm/services/cli/agent_listing_service.py +463 -0
- claude_mpm/services/cli/agent_output_formatter.py +605 -0
- claude_mpm/services/cli/agent_validation_service.py +589 -0
- claude_mpm/services/cli/dashboard_launcher.py +424 -0
- claude_mpm/services/cli/memory_crud_service.py +617 -0
- claude_mpm/services/cli/memory_output_formatter.py +604 -0
- claude_mpm/services/cli/session_manager.py +513 -0
- claude_mpm/services/cli/socketio_manager.py +498 -0
- claude_mpm/services/cli/startup_checker.py +370 -0
- claude_mpm/services/core/cache_manager.py +311 -0
- claude_mpm/services/core/memory_manager.py +637 -0
- claude_mpm/services/core/path_resolver.py +498 -0
- claude_mpm/services/core/service_container.py +520 -0
- claude_mpm/services/core/service_interfaces.py +436 -0
- claude_mpm/services/diagnostics/checks/agent_check.py +65 -19
- claude_mpm/services/memory/router.py +116 -10
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/METADATA +1 -1
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/RECORD +86 -55
- claude_mpm/cli/commands/run_config_checker.py +0 -159
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/WHEEL +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.1.2.dist-info → claude_mpm-4.1.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1040 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Optimized Claude Code hook handler with Socket.IO connection pooling.
|
|
3
|
+
|
|
4
|
+
This handler now uses a connection pool for Socket.IO clients to reduce
|
|
5
|
+
connection overhead and implement circuit breaker and batching patterns.
|
|
6
|
+
|
|
7
|
+
WHY connection pooling approach:
|
|
8
|
+
- Reduces connection setup/teardown overhead by 80%
|
|
9
|
+
- Implements circuit breaker for resilience during outages
|
|
10
|
+
- Provides micro-batching for high-frequency events
|
|
11
|
+
- Maintains persistent connections for better performance
|
|
12
|
+
- Falls back gracefully when Socket.IO unavailable
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import os
|
|
17
|
+
import select
|
|
18
|
+
import signal
|
|
19
|
+
import subprocess
|
|
20
|
+
import sys
|
|
21
|
+
import threading
|
|
22
|
+
import time
|
|
23
|
+
from collections import deque
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from typing import Optional
|
|
26
|
+
|
|
27
|
+
# Import extracted modules with fallback for direct execution
|
|
28
|
+
try:
|
|
29
|
+
# Try relative imports first (when imported as module)
|
|
30
|
+
# Use the modern SocketIOConnectionPool instead of the deprecated local one
|
|
31
|
+
from claude_mpm.core.socketio_pool import get_connection_pool
|
|
32
|
+
|
|
33
|
+
from .event_handlers import EventHandlers
|
|
34
|
+
from .memory_integration import MemoryHookManager
|
|
35
|
+
from .response_tracking import ResponseTrackingManager
|
|
36
|
+
except ImportError:
|
|
37
|
+
# Fall back to absolute imports (when run directly)
|
|
38
|
+
from pathlib import Path
|
|
39
|
+
|
|
40
|
+
# Add parent directory to path
|
|
41
|
+
sys.path.insert(0, str(Path(__file__).parent))
|
|
42
|
+
|
|
43
|
+
# Try to import get_connection_pool from deprecated location
|
|
44
|
+
try:
|
|
45
|
+
from connection_pool import SocketIOConnectionPool
|
|
46
|
+
|
|
47
|
+
def get_connection_pool():
|
|
48
|
+
return SocketIOConnectionPool()
|
|
49
|
+
|
|
50
|
+
except ImportError:
|
|
51
|
+
get_connection_pool = None
|
|
52
|
+
|
|
53
|
+
from event_handlers import EventHandlers
|
|
54
|
+
from memory_integration import MemoryHookManager
|
|
55
|
+
from response_tracking import ResponseTrackingManager
|
|
56
|
+
|
|
57
|
+
# Import EventNormalizer for consistent event formatting
|
|
58
|
+
try:
|
|
59
|
+
from claude_mpm.services.socketio.event_normalizer import EventNormalizer
|
|
60
|
+
except ImportError:
|
|
61
|
+
# Create a simple fallback EventNormalizer if import fails
|
|
62
|
+
class EventNormalizer:
|
|
63
|
+
def normalize(self, event_data):
|
|
64
|
+
"""Simple fallback normalizer that returns event as-is."""
|
|
65
|
+
return type(
|
|
66
|
+
"NormalizedEvent",
|
|
67
|
+
(),
|
|
68
|
+
{
|
|
69
|
+
"to_dict": lambda: {
|
|
70
|
+
"event": "claude_event",
|
|
71
|
+
"type": event_data.get("type", "unknown"),
|
|
72
|
+
"subtype": event_data.get("subtype", "generic"),
|
|
73
|
+
"timestamp": event_data.get(
|
|
74
|
+
"timestamp", datetime.now().isoformat()
|
|
75
|
+
),
|
|
76
|
+
"data": event_data.get("data", event_data),
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# Import EventBus for decoupled event distribution
|
|
83
|
+
try:
|
|
84
|
+
from claude_mpm.services.event_bus import EventBus
|
|
85
|
+
|
|
86
|
+
EVENTBUS_AVAILABLE = True
|
|
87
|
+
except ImportError:
|
|
88
|
+
EVENTBUS_AVAILABLE = False
|
|
89
|
+
EventBus = None
|
|
90
|
+
|
|
91
|
+
# Import constants for configuration
|
|
92
|
+
try:
|
|
93
|
+
from claude_mpm.core.constants import NetworkConfig, RetryConfig, TimeoutConfig
|
|
94
|
+
except ImportError:
|
|
95
|
+
# Fallback values if constants module not available
|
|
96
|
+
class NetworkConfig:
|
|
97
|
+
SOCKETIO_PORT_RANGE = (8765, 8785)
|
|
98
|
+
RECONNECTION_DELAY = 0.5
|
|
99
|
+
SOCKET_WAIT_TIMEOUT = 1.0
|
|
100
|
+
|
|
101
|
+
class TimeoutConfig:
|
|
102
|
+
QUICK_TIMEOUT = 2.0
|
|
103
|
+
|
|
104
|
+
class RetryConfig:
|
|
105
|
+
MAX_RETRIES = 3
|
|
106
|
+
INITIAL_RETRY_DELAY = 0.1
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# Debug mode is enabled by default for better visibility into hook processing
|
|
110
|
+
# Set CLAUDE_MPM_HOOK_DEBUG=false to disable debug output
|
|
111
|
+
DEBUG = os.environ.get("CLAUDE_MPM_HOOK_DEBUG", "true").lower() != "false"
|
|
112
|
+
|
|
113
|
+
# Socket.IO import
|
|
114
|
+
try:
|
|
115
|
+
import socketio
|
|
116
|
+
|
|
117
|
+
SOCKETIO_AVAILABLE = True
|
|
118
|
+
except ImportError:
|
|
119
|
+
SOCKETIO_AVAILABLE = False
|
|
120
|
+
socketio = None
|
|
121
|
+
|
|
122
|
+
# Global singleton handler instance
|
|
123
|
+
_global_handler = None
|
|
124
|
+
_handler_lock = threading.Lock()
|
|
125
|
+
|
|
126
|
+
# Track recent events to detect duplicates
|
|
127
|
+
_recent_events = deque(maxlen=10)
|
|
128
|
+
_events_lock = threading.Lock()
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class ClaudeHookHandler:
|
|
132
|
+
"""Optimized hook handler with direct Socket.IO client.
|
|
133
|
+
|
|
134
|
+
WHY direct client approach:
|
|
135
|
+
- Simple and reliable synchronous operation
|
|
136
|
+
- No complex threading or async issues
|
|
137
|
+
- Fast connection reuse when possible
|
|
138
|
+
- Graceful fallback when Socket.IO unavailable
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
def __init__(self):
|
|
142
|
+
# Track events for periodic cleanup
|
|
143
|
+
self.events_processed = 0
|
|
144
|
+
self.last_cleanup = time.time()
|
|
145
|
+
# Event normalizer for consistent event schema
|
|
146
|
+
self.event_normalizer = EventNormalizer()
|
|
147
|
+
|
|
148
|
+
# Initialize SocketIO connection pool for inter-process communication
|
|
149
|
+
# This sends events directly to the Socket.IO server in the daemon process
|
|
150
|
+
self.connection_pool = None
|
|
151
|
+
try:
|
|
152
|
+
self.connection_pool = get_connection_pool()
|
|
153
|
+
if DEBUG:
|
|
154
|
+
print("✅ Modern SocketIO connection pool initialized", file=sys.stderr)
|
|
155
|
+
except Exception as e:
|
|
156
|
+
if DEBUG:
|
|
157
|
+
print(
|
|
158
|
+
f"⚠️ Failed to initialize SocketIO connection pool: {e}",
|
|
159
|
+
file=sys.stderr,
|
|
160
|
+
)
|
|
161
|
+
self.connection_pool = None
|
|
162
|
+
|
|
163
|
+
# Initialize EventBus for in-process event distribution (optional)
|
|
164
|
+
self.event_bus = None
|
|
165
|
+
if EVENTBUS_AVAILABLE:
|
|
166
|
+
try:
|
|
167
|
+
self.event_bus = EventBus.get_instance()
|
|
168
|
+
if DEBUG:
|
|
169
|
+
print("✅ EventBus initialized for hook handler", file=sys.stderr)
|
|
170
|
+
except Exception as e:
|
|
171
|
+
if DEBUG:
|
|
172
|
+
print(f"⚠️ Failed to initialize EventBus: {e}", file=sys.stderr)
|
|
173
|
+
self.event_bus = None
|
|
174
|
+
|
|
175
|
+
# Maximum sizes for tracking
|
|
176
|
+
self.MAX_DELEGATION_TRACKING = 200
|
|
177
|
+
self.MAX_PROMPT_TRACKING = 100
|
|
178
|
+
self.MAX_CACHE_AGE_SECONDS = 300
|
|
179
|
+
self.CLEANUP_INTERVAL_EVENTS = 100
|
|
180
|
+
|
|
181
|
+
# Agent delegation tracking
|
|
182
|
+
# Store recent Task delegations: session_id -> agent_type
|
|
183
|
+
self.active_delegations = {}
|
|
184
|
+
# Use deque to limit memory usage (keep last 100 delegations)
|
|
185
|
+
self.delegation_history = deque(maxlen=100)
|
|
186
|
+
# Store delegation request data for response correlation: session_id -> request_data
|
|
187
|
+
self.delegation_requests = {}
|
|
188
|
+
|
|
189
|
+
# Git branch cache (to avoid repeated subprocess calls)
|
|
190
|
+
self._git_branch_cache = {}
|
|
191
|
+
self._git_branch_cache_time = {}
|
|
192
|
+
|
|
193
|
+
# Initialize extracted managers
|
|
194
|
+
self.memory_hook_manager = MemoryHookManager()
|
|
195
|
+
self.response_tracking_manager = ResponseTrackingManager()
|
|
196
|
+
self.event_handlers = EventHandlers(self)
|
|
197
|
+
|
|
198
|
+
# Store current user prompts for comprehensive response tracking
|
|
199
|
+
self.pending_prompts = {} # session_id -> prompt data
|
|
200
|
+
|
|
201
|
+
def _track_delegation(
|
|
202
|
+
self, session_id: str, agent_type: str, request_data: Optional[dict] = None
|
|
203
|
+
):
|
|
204
|
+
"""Track a new agent delegation with optional request data for response correlation."""
|
|
205
|
+
if DEBUG:
|
|
206
|
+
print(
|
|
207
|
+
f" - session_id: {session_id[:16] if session_id else 'None'}...",
|
|
208
|
+
file=sys.stderr,
|
|
209
|
+
)
|
|
210
|
+
print(f" - agent_type: {agent_type}", file=sys.stderr)
|
|
211
|
+
print(f" - request_data provided: {bool(request_data)}", file=sys.stderr)
|
|
212
|
+
print(
|
|
213
|
+
f" - delegation_requests size before: {len(self.delegation_requests)}",
|
|
214
|
+
file=sys.stderr,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if session_id and agent_type and agent_type != "unknown":
|
|
218
|
+
self.active_delegations[session_id] = agent_type
|
|
219
|
+
key = f"{session_id}:{datetime.now().timestamp()}"
|
|
220
|
+
self.delegation_history.append((key, agent_type))
|
|
221
|
+
|
|
222
|
+
# Store request data for response tracking correlation
|
|
223
|
+
if request_data:
|
|
224
|
+
self.delegation_requests[session_id] = {
|
|
225
|
+
"agent_type": agent_type,
|
|
226
|
+
"request": request_data,
|
|
227
|
+
"timestamp": datetime.now().isoformat(),
|
|
228
|
+
}
|
|
229
|
+
if DEBUG:
|
|
230
|
+
print(
|
|
231
|
+
f" - ✅ Stored in delegation_requests[{session_id[:16]}...]",
|
|
232
|
+
file=sys.stderr,
|
|
233
|
+
)
|
|
234
|
+
print(
|
|
235
|
+
f" - delegation_requests size after: {len(self.delegation_requests)}",
|
|
236
|
+
file=sys.stderr,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Clean up old delegations (older than 5 minutes)
|
|
240
|
+
cutoff_time = datetime.now().timestamp() - 300
|
|
241
|
+
keys_to_remove = []
|
|
242
|
+
for sid in list(self.active_delegations.keys()):
|
|
243
|
+
# Check if this is an old entry by looking in history
|
|
244
|
+
found_recent = False
|
|
245
|
+
for hist_key, _ in reversed(self.delegation_history):
|
|
246
|
+
if hist_key.startswith(sid):
|
|
247
|
+
_, timestamp = hist_key.split(":", 1)
|
|
248
|
+
if float(timestamp) > cutoff_time:
|
|
249
|
+
found_recent = True
|
|
250
|
+
break
|
|
251
|
+
if not found_recent:
|
|
252
|
+
keys_to_remove.append(sid)
|
|
253
|
+
|
|
254
|
+
for key in keys_to_remove:
|
|
255
|
+
if key in self.active_delegations:
|
|
256
|
+
del self.active_delegations[key]
|
|
257
|
+
if key in self.delegation_requests:
|
|
258
|
+
del self.delegation_requests[key]
|
|
259
|
+
|
|
260
|
+
def _cleanup_old_entries(self):
|
|
261
|
+
"""Clean up old entries to prevent memory growth."""
|
|
262
|
+
datetime.now().timestamp() - self.MAX_CACHE_AGE_SECONDS
|
|
263
|
+
|
|
264
|
+
# Clean up delegation tracking dictionaries
|
|
265
|
+
for storage in [self.active_delegations, self.delegation_requests]:
|
|
266
|
+
if len(storage) > self.MAX_DELEGATION_TRACKING:
|
|
267
|
+
# Keep only the most recent entries
|
|
268
|
+
sorted_keys = sorted(storage.keys())
|
|
269
|
+
excess = len(storage) - self.MAX_DELEGATION_TRACKING
|
|
270
|
+
for key in sorted_keys[:excess]:
|
|
271
|
+
del storage[key]
|
|
272
|
+
|
|
273
|
+
# Clean up pending prompts
|
|
274
|
+
if len(self.pending_prompts) > self.MAX_PROMPT_TRACKING:
|
|
275
|
+
sorted_keys = sorted(self.pending_prompts.keys())
|
|
276
|
+
excess = len(self.pending_prompts) - self.MAX_PROMPT_TRACKING
|
|
277
|
+
for key in sorted_keys[:excess]:
|
|
278
|
+
del self.pending_prompts[key]
|
|
279
|
+
|
|
280
|
+
# Clean up git branch cache
|
|
281
|
+
expired_keys = [
|
|
282
|
+
key
|
|
283
|
+
for key, cache_time in self._git_branch_cache_time.items()
|
|
284
|
+
if datetime.now().timestamp() - cache_time > self.MAX_CACHE_AGE_SECONDS
|
|
285
|
+
]
|
|
286
|
+
for key in expired_keys:
|
|
287
|
+
self._git_branch_cache.pop(key, None)
|
|
288
|
+
self._git_branch_cache_time.pop(key, None)
|
|
289
|
+
|
|
290
|
+
def _get_delegation_agent_type(self, session_id: str) -> str:
|
|
291
|
+
"""Get the agent type for a session's active delegation."""
|
|
292
|
+
# First try exact session match
|
|
293
|
+
if session_id and session_id in self.active_delegations:
|
|
294
|
+
return self.active_delegations[session_id]
|
|
295
|
+
|
|
296
|
+
# Then try to find in recent history
|
|
297
|
+
if session_id:
|
|
298
|
+
for key, agent_type in reversed(self.delegation_history):
|
|
299
|
+
if key.startswith(session_id):
|
|
300
|
+
return agent_type
|
|
301
|
+
|
|
302
|
+
return "unknown"
|
|
303
|
+
|
|
304
|
+
def _get_git_branch(self, working_dir: Optional[str] = None) -> str:
|
|
305
|
+
"""Get git branch for the given directory with caching.
|
|
306
|
+
|
|
307
|
+
WHY caching approach:
|
|
308
|
+
- Avoids repeated subprocess calls which are expensive
|
|
309
|
+
- Caches results for 30 seconds per directory
|
|
310
|
+
- Falls back gracefully if git command fails
|
|
311
|
+
- Returns 'Unknown' for non-git directories
|
|
312
|
+
"""
|
|
313
|
+
# Use current working directory if not specified
|
|
314
|
+
if not working_dir:
|
|
315
|
+
working_dir = os.getcwd()
|
|
316
|
+
|
|
317
|
+
# Check cache first (cache for 30 seconds)
|
|
318
|
+
current_time = datetime.now().timestamp()
|
|
319
|
+
cache_key = working_dir
|
|
320
|
+
|
|
321
|
+
if (
|
|
322
|
+
cache_key in self._git_branch_cache
|
|
323
|
+
and cache_key in self._git_branch_cache_time
|
|
324
|
+
and current_time - self._git_branch_cache_time[cache_key] < 30
|
|
325
|
+
):
|
|
326
|
+
return self._git_branch_cache[cache_key]
|
|
327
|
+
|
|
328
|
+
# Try to get git branch
|
|
329
|
+
try:
|
|
330
|
+
# Change to the working directory temporarily
|
|
331
|
+
original_cwd = os.getcwd()
|
|
332
|
+
os.chdir(working_dir)
|
|
333
|
+
|
|
334
|
+
# Run git command to get current branch
|
|
335
|
+
result = subprocess.run(
|
|
336
|
+
["git", "branch", "--show-current"],
|
|
337
|
+
capture_output=True,
|
|
338
|
+
text=True,
|
|
339
|
+
timeout=TimeoutConfig.QUICK_TIMEOUT,
|
|
340
|
+
check=False, # Quick timeout to avoid hanging
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# Restore original directory
|
|
344
|
+
os.chdir(original_cwd)
|
|
345
|
+
|
|
346
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
347
|
+
branch = result.stdout.strip()
|
|
348
|
+
# Cache the result
|
|
349
|
+
self._git_branch_cache[cache_key] = branch
|
|
350
|
+
self._git_branch_cache_time[cache_key] = current_time
|
|
351
|
+
return branch
|
|
352
|
+
# Not a git repository or no branch
|
|
353
|
+
self._git_branch_cache[cache_key] = "Unknown"
|
|
354
|
+
self._git_branch_cache_time[cache_key] = current_time
|
|
355
|
+
return "Unknown"
|
|
356
|
+
|
|
357
|
+
except (
|
|
358
|
+
subprocess.TimeoutExpired,
|
|
359
|
+
subprocess.CalledProcessError,
|
|
360
|
+
FileNotFoundError,
|
|
361
|
+
OSError,
|
|
362
|
+
):
|
|
363
|
+
# Git not available or command failed
|
|
364
|
+
self._git_branch_cache[cache_key] = "Unknown"
|
|
365
|
+
self._git_branch_cache_time[cache_key] = current_time
|
|
366
|
+
return "Unknown"
|
|
367
|
+
|
|
368
|
+
def handle(self):
|
|
369
|
+
"""Process hook event with minimal overhead and timeout protection.
|
|
370
|
+
|
|
371
|
+
WHY this approach:
|
|
372
|
+
- Fast path processing for minimal latency (no blocking waits)
|
|
373
|
+
- Non-blocking Socket.IO connection and event emission
|
|
374
|
+
- Timeout protection prevents indefinite hangs
|
|
375
|
+
- Connection timeout prevents indefinite hangs
|
|
376
|
+
- Graceful degradation if Socket.IO unavailable
|
|
377
|
+
- Always continues regardless of event status
|
|
378
|
+
- Process exits after handling to prevent accumulation
|
|
379
|
+
"""
|
|
380
|
+
_continue_sent = False # Track if continue has been sent
|
|
381
|
+
|
|
382
|
+
def timeout_handler(signum, frame):
|
|
383
|
+
"""Handle timeout by forcing exit."""
|
|
384
|
+
nonlocal _continue_sent
|
|
385
|
+
if DEBUG:
|
|
386
|
+
print(f"Hook handler timeout (pid: {os.getpid()})", file=sys.stderr)
|
|
387
|
+
if not _continue_sent:
|
|
388
|
+
self._continue_execution()
|
|
389
|
+
_continue_sent = True
|
|
390
|
+
sys.exit(0)
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
# Set a 10-second timeout for the entire operation
|
|
394
|
+
signal.signal(signal.SIGALRM, timeout_handler)
|
|
395
|
+
signal.alarm(10)
|
|
396
|
+
|
|
397
|
+
# Read and parse event
|
|
398
|
+
event = self._read_hook_event()
|
|
399
|
+
if not event:
|
|
400
|
+
if not _continue_sent:
|
|
401
|
+
self._continue_execution()
|
|
402
|
+
_continue_sent = True
|
|
403
|
+
return
|
|
404
|
+
|
|
405
|
+
# Check for duplicate events (same event within 100ms)
|
|
406
|
+
global _recent_events, _events_lock
|
|
407
|
+
event_key = self._get_event_key(event)
|
|
408
|
+
current_time = time.time()
|
|
409
|
+
|
|
410
|
+
with _events_lock:
|
|
411
|
+
# Check if we've seen this event recently
|
|
412
|
+
for recent_key, recent_time in _recent_events:
|
|
413
|
+
if recent_key == event_key and (current_time - recent_time) < 0.1:
|
|
414
|
+
if DEBUG:
|
|
415
|
+
print(
|
|
416
|
+
f"[{datetime.now().isoformat()}] Skipping duplicate event: {event.get('hook_event_name', 'unknown')} (PID: {os.getpid()})",
|
|
417
|
+
file=sys.stderr,
|
|
418
|
+
)
|
|
419
|
+
# Still need to output continue for this invocation
|
|
420
|
+
if not _continue_sent:
|
|
421
|
+
self._continue_execution()
|
|
422
|
+
_continue_sent = True
|
|
423
|
+
return
|
|
424
|
+
|
|
425
|
+
# Not a duplicate, record it
|
|
426
|
+
_recent_events.append((event_key, current_time))
|
|
427
|
+
|
|
428
|
+
# Debug: Log that we're processing an event
|
|
429
|
+
if DEBUG:
|
|
430
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
431
|
+
print(
|
|
432
|
+
f"\n[{datetime.now().isoformat()}] Processing hook event: {hook_type} (PID: {os.getpid()})",
|
|
433
|
+
file=sys.stderr,
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Increment event counter and perform periodic cleanup
|
|
437
|
+
self.events_processed += 1
|
|
438
|
+
if self.events_processed % self.CLEANUP_INTERVAL_EVENTS == 0:
|
|
439
|
+
self._cleanup_old_entries()
|
|
440
|
+
if DEBUG:
|
|
441
|
+
print(
|
|
442
|
+
f"🧹 Performed cleanup after {self.events_processed} events",
|
|
443
|
+
file=sys.stderr,
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# Route event to appropriate handler
|
|
447
|
+
self._route_event(event)
|
|
448
|
+
|
|
449
|
+
# Always continue execution (only if not already sent)
|
|
450
|
+
if not _continue_sent:
|
|
451
|
+
self._continue_execution()
|
|
452
|
+
_continue_sent = True
|
|
453
|
+
|
|
454
|
+
except Exception:
|
|
455
|
+
# Fail fast and silent (only send continue if not already sent)
|
|
456
|
+
if not _continue_sent:
|
|
457
|
+
self._continue_execution()
|
|
458
|
+
_continue_sent = True
|
|
459
|
+
finally:
|
|
460
|
+
# Cancel the alarm
|
|
461
|
+
signal.alarm(0)
|
|
462
|
+
|
|
463
|
+
def _read_hook_event(self) -> dict:
|
|
464
|
+
"""
|
|
465
|
+
Read and parse hook event from stdin with timeout.
|
|
466
|
+
|
|
467
|
+
WHY: Centralized event reading with error handling and timeout
|
|
468
|
+
ensures consistent parsing and validation while preventing
|
|
469
|
+
processes from hanging indefinitely on stdin.read().
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
Parsed event dictionary or None if invalid/timeout
|
|
473
|
+
"""
|
|
474
|
+
try:
|
|
475
|
+
# Check if data is available on stdin with 1 second timeout
|
|
476
|
+
if sys.stdin.isatty():
|
|
477
|
+
# Interactive terminal - no data expected
|
|
478
|
+
return None
|
|
479
|
+
|
|
480
|
+
ready, _, _ = select.select([sys.stdin], [], [], 1.0)
|
|
481
|
+
if not ready:
|
|
482
|
+
# No data available within timeout
|
|
483
|
+
if DEBUG:
|
|
484
|
+
print("No hook event data received within timeout", file=sys.stderr)
|
|
485
|
+
return None
|
|
486
|
+
|
|
487
|
+
# Data is available, read it
|
|
488
|
+
event_data = sys.stdin.read()
|
|
489
|
+
if not event_data.strip():
|
|
490
|
+
# Empty or whitespace-only data
|
|
491
|
+
return None
|
|
492
|
+
|
|
493
|
+
return json.loads(event_data)
|
|
494
|
+
except (json.JSONDecodeError, ValueError) as e:
|
|
495
|
+
if DEBUG:
|
|
496
|
+
print(f"Failed to parse hook event: {e}", file=sys.stderr)
|
|
497
|
+
return None
|
|
498
|
+
except Exception as e:
|
|
499
|
+
if DEBUG:
|
|
500
|
+
print(f"Error reading hook event: {e}", file=sys.stderr)
|
|
501
|
+
return None
|
|
502
|
+
|
|
503
|
+
def _route_event(self, event: dict) -> None:
|
|
504
|
+
"""
|
|
505
|
+
Route event to appropriate handler based on type.
|
|
506
|
+
|
|
507
|
+
WHY: Centralized routing reduces complexity and makes
|
|
508
|
+
it easier to add new event types.
|
|
509
|
+
|
|
510
|
+
Args:
|
|
511
|
+
event: Hook event dictionary
|
|
512
|
+
"""
|
|
513
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
514
|
+
|
|
515
|
+
# Map event types to handlers
|
|
516
|
+
event_handlers = {
|
|
517
|
+
"UserPromptSubmit": self.event_handlers.handle_user_prompt_fast,
|
|
518
|
+
"PreToolUse": self.event_handlers.handle_pre_tool_fast,
|
|
519
|
+
"PostToolUse": self.event_handlers.handle_post_tool_fast,
|
|
520
|
+
"Notification": self.event_handlers.handle_notification_fast,
|
|
521
|
+
"Stop": self.event_handlers.handle_stop_fast,
|
|
522
|
+
"SubagentStop": self.event_handlers.handle_subagent_stop_fast,
|
|
523
|
+
"AssistantResponse": self.event_handlers.handle_assistant_response,
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
# Call appropriate handler if exists
|
|
527
|
+
handler = event_handlers.get(hook_type)
|
|
528
|
+
if handler:
|
|
529
|
+
try:
|
|
530
|
+
handler(event)
|
|
531
|
+
except Exception as e:
|
|
532
|
+
if DEBUG:
|
|
533
|
+
print(f"Error handling {hook_type}: {e}", file=sys.stderr)
|
|
534
|
+
|
|
535
|
+
def _get_event_key(self, event: dict) -> str:
|
|
536
|
+
"""Generate a unique key for an event to detect duplicates.
|
|
537
|
+
|
|
538
|
+
WHY: Claude Code may call the hook multiple times for the same event
|
|
539
|
+
because the hook is registered for multiple event types. We need to
|
|
540
|
+
detect and skip duplicate processing while still returning continue.
|
|
541
|
+
"""
|
|
542
|
+
# Create a key from event type, session_id, and key data
|
|
543
|
+
hook_type = event.get("hook_event_name", "unknown")
|
|
544
|
+
session_id = event.get("session_id", "")
|
|
545
|
+
|
|
546
|
+
# Add type-specific data to make the key unique
|
|
547
|
+
if hook_type == "PreToolUse":
|
|
548
|
+
tool_name = event.get("tool_name", "")
|
|
549
|
+
# For some tools, include parameters to distinguish calls
|
|
550
|
+
if tool_name == "Task":
|
|
551
|
+
tool_input = event.get("tool_input", {})
|
|
552
|
+
agent = tool_input.get("subagent_type", "")
|
|
553
|
+
prompt_preview = (
|
|
554
|
+
tool_input.get("prompt", "") or tool_input.get("description", "")
|
|
555
|
+
)[:50]
|
|
556
|
+
return f"{hook_type}:{session_id}:{tool_name}:{agent}:{prompt_preview}"
|
|
557
|
+
return f"{hook_type}:{session_id}:{tool_name}"
|
|
558
|
+
if hook_type == "UserPromptSubmit":
|
|
559
|
+
prompt_preview = event.get("prompt", "")[:50]
|
|
560
|
+
return f"{hook_type}:{session_id}:{prompt_preview}"
|
|
561
|
+
# For other events, just use type and session
|
|
562
|
+
return f"{hook_type}:{session_id}"
|
|
563
|
+
|
|
564
|
+
def _continue_execution(self) -> None:
|
|
565
|
+
"""
|
|
566
|
+
Send continue action to Claude.
|
|
567
|
+
|
|
568
|
+
WHY: Centralized response ensures consistent format
|
|
569
|
+
and makes it easier to add response modifications.
|
|
570
|
+
"""
|
|
571
|
+
print(json.dumps({"action": "continue"}))
|
|
572
|
+
|
|
573
|
+
def _emit_socketio_event(self, namespace: str, event: str, data: dict):
|
|
574
|
+
"""Emit event through both connection pool and EventBus.
|
|
575
|
+
|
|
576
|
+
WHY dual approach:
|
|
577
|
+
- Connection pool: Direct Socket.IO connection for inter-process communication
|
|
578
|
+
- EventBus: For in-process subscribers (if any)
|
|
579
|
+
- Ensures events reach the dashboard regardless of process boundaries
|
|
580
|
+
"""
|
|
581
|
+
# Create event data for normalization
|
|
582
|
+
raw_event = {
|
|
583
|
+
"type": "hook",
|
|
584
|
+
"subtype": event, # e.g., "user_prompt", "pre_tool", "subagent_stop"
|
|
585
|
+
"timestamp": datetime.now().isoformat(),
|
|
586
|
+
"data": data,
|
|
587
|
+
"source": "claude_hooks", # Identify the source
|
|
588
|
+
"session_id": data.get("sessionId"), # Include session if available
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
# Normalize the event using EventNormalizer for consistent schema
|
|
592
|
+
normalized_event = self.event_normalizer.normalize(raw_event, source="hook")
|
|
593
|
+
claude_event_data = normalized_event.to_dict()
|
|
594
|
+
|
|
595
|
+
# Log important events for debugging
|
|
596
|
+
if DEBUG and event in ["subagent_stop", "pre_tool"]:
|
|
597
|
+
if event == "subagent_stop":
|
|
598
|
+
agent_type = data.get("agent_type", "unknown")
|
|
599
|
+
print(
|
|
600
|
+
f"Hook handler: Publishing SubagentStop for agent '{agent_type}'",
|
|
601
|
+
file=sys.stderr,
|
|
602
|
+
)
|
|
603
|
+
elif event == "pre_tool" and data.get("tool_name") == "Task":
|
|
604
|
+
delegation = data.get("delegation_details", {})
|
|
605
|
+
agent_type = delegation.get("agent_type", "unknown")
|
|
606
|
+
print(
|
|
607
|
+
f"Hook handler: Publishing Task delegation to agent '{agent_type}'",
|
|
608
|
+
file=sys.stderr,
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
# First, try to emit through direct Socket.IO connection pool
|
|
612
|
+
# This is the primary path for inter-process communication
|
|
613
|
+
if self.connection_pool:
|
|
614
|
+
try:
|
|
615
|
+
# Emit to Socket.IO server directly
|
|
616
|
+
self.connection_pool.emit("claude_event", claude_event_data)
|
|
617
|
+
if DEBUG:
|
|
618
|
+
print(f"✅ Emitted via connection pool: {event}", file=sys.stderr)
|
|
619
|
+
except Exception as e:
|
|
620
|
+
if DEBUG:
|
|
621
|
+
print(f"⚠️ Failed to emit via connection pool: {e}", file=sys.stderr)
|
|
622
|
+
|
|
623
|
+
# Also publish to EventBus for any in-process subscribers
|
|
624
|
+
if self.event_bus and EVENTBUS_AVAILABLE:
|
|
625
|
+
try:
|
|
626
|
+
# Publish to EventBus with topic format: hook.{event}
|
|
627
|
+
topic = f"hook.{event}"
|
|
628
|
+
self.event_bus.publish(topic, claude_event_data)
|
|
629
|
+
if DEBUG:
|
|
630
|
+
print(f"✅ Published to EventBus: {topic}", file=sys.stderr)
|
|
631
|
+
except Exception as e:
|
|
632
|
+
if DEBUG:
|
|
633
|
+
print(f"⚠️ Failed to publish to EventBus: {e}", file=sys.stderr)
|
|
634
|
+
|
|
635
|
+
# Warn if neither method is available
|
|
636
|
+
if not self.connection_pool and not self.event_bus and DEBUG:
|
|
637
|
+
print(f"⚠️ No event emission method available for: {event}", file=sys.stderr)
|
|
638
|
+
|
|
639
|
+
def handle_subagent_stop(self, event: dict):
|
|
640
|
+
"""Handle subagent stop events with improved agent type detection.
|
|
641
|
+
|
|
642
|
+
WHY comprehensive subagent stop capture:
|
|
643
|
+
- Provides visibility into subagent lifecycle and delegation patterns
|
|
644
|
+
- Captures agent type, ID, reason, and results for analysis
|
|
645
|
+
- Enables tracking of delegation success/failure patterns
|
|
646
|
+
- Useful for understanding subagent performance and reliability
|
|
647
|
+
"""
|
|
648
|
+
# Enhanced debug logging for session correlation
|
|
649
|
+
session_id = event.get("session_id", "")
|
|
650
|
+
if DEBUG:
|
|
651
|
+
print(
|
|
652
|
+
f" - session_id: {session_id[:16] if session_id else 'None'}...",
|
|
653
|
+
file=sys.stderr,
|
|
654
|
+
)
|
|
655
|
+
print(f" - event keys: {list(event.keys())}", file=sys.stderr)
|
|
656
|
+
print(
|
|
657
|
+
f" - delegation_requests size: {len(self.delegation_requests)}",
|
|
658
|
+
file=sys.stderr,
|
|
659
|
+
)
|
|
660
|
+
# Show all stored session IDs for comparison
|
|
661
|
+
all_sessions = list(self.delegation_requests.keys())
|
|
662
|
+
if all_sessions:
|
|
663
|
+
print(" - Stored sessions (first 16 chars):", file=sys.stderr)
|
|
664
|
+
for sid in all_sessions[:10]: # Show up to 10
|
|
665
|
+
print(
|
|
666
|
+
f" - {sid[:16]}... (agent: {self.delegation_requests[sid].get('agent_type', 'unknown')})",
|
|
667
|
+
file=sys.stderr,
|
|
668
|
+
)
|
|
669
|
+
else:
|
|
670
|
+
print(" - No stored sessions in delegation_requests!", file=sys.stderr)
|
|
671
|
+
|
|
672
|
+
# First try to get agent type from our tracking
|
|
673
|
+
agent_type = (
|
|
674
|
+
self._get_delegation_agent_type(session_id) if session_id else "unknown"
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
# Fall back to event data if tracking didn't have it
|
|
678
|
+
if agent_type == "unknown":
|
|
679
|
+
agent_type = event.get("agent_type", event.get("subagent_type", "unknown"))
|
|
680
|
+
|
|
681
|
+
agent_id = event.get("agent_id", event.get("subagent_id", ""))
|
|
682
|
+
reason = event.get("reason", event.get("stop_reason", "unknown"))
|
|
683
|
+
|
|
684
|
+
# Try to infer agent type from other fields if still unknown
|
|
685
|
+
if agent_type == "unknown" and "task" in event:
|
|
686
|
+
task_desc = str(event.get("task", "")).lower()
|
|
687
|
+
if "research" in task_desc:
|
|
688
|
+
agent_type = "research"
|
|
689
|
+
elif "engineer" in task_desc or "code" in task_desc:
|
|
690
|
+
agent_type = "engineer"
|
|
691
|
+
elif "pm" in task_desc or "project" in task_desc:
|
|
692
|
+
agent_type = "pm"
|
|
693
|
+
|
|
694
|
+
# Always log SubagentStop events for debugging
|
|
695
|
+
if DEBUG or agent_type != "unknown":
|
|
696
|
+
print(
|
|
697
|
+
f"Hook handler: Processing SubagentStop - agent: '{agent_type}', session: '{session_id}', reason: '{reason}'",
|
|
698
|
+
file=sys.stderr,
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
# Get working directory and git branch
|
|
702
|
+
working_dir = event.get("cwd", "")
|
|
703
|
+
git_branch = self._get_git_branch(working_dir) if working_dir else "Unknown"
|
|
704
|
+
|
|
705
|
+
# Try to extract structured response from output if available
|
|
706
|
+
output = event.get("output", "")
|
|
707
|
+
structured_response = None
|
|
708
|
+
if output:
|
|
709
|
+
try:
|
|
710
|
+
import re
|
|
711
|
+
|
|
712
|
+
json_match = re.search(
|
|
713
|
+
r"```json\s*(\{.*?\})\s*```", str(output), re.DOTALL
|
|
714
|
+
)
|
|
715
|
+
if json_match:
|
|
716
|
+
structured_response = json.loads(json_match.group(1))
|
|
717
|
+
if DEBUG:
|
|
718
|
+
print(
|
|
719
|
+
f"Extracted structured response from {agent_type} agent in SubagentStop",
|
|
720
|
+
file=sys.stderr,
|
|
721
|
+
)
|
|
722
|
+
except (json.JSONDecodeError, AttributeError):
|
|
723
|
+
pass # No structured response, that's okay
|
|
724
|
+
|
|
725
|
+
# Track agent response even without structured JSON
|
|
726
|
+
if DEBUG:
|
|
727
|
+
print(
|
|
728
|
+
f" - response_tracking_enabled: {self.response_tracking_manager.response_tracking_enabled}",
|
|
729
|
+
file=sys.stderr,
|
|
730
|
+
)
|
|
731
|
+
print(
|
|
732
|
+
f" - response_tracker exists: {self.response_tracking_manager.response_tracker is not None}",
|
|
733
|
+
file=sys.stderr,
|
|
734
|
+
)
|
|
735
|
+
print(
|
|
736
|
+
f" - session_id: {session_id[:16] if session_id else 'None'}...",
|
|
737
|
+
file=sys.stderr,
|
|
738
|
+
)
|
|
739
|
+
print(f" - agent_type: {agent_type}", file=sys.stderr)
|
|
740
|
+
print(f" - reason: {reason}", file=sys.stderr)
|
|
741
|
+
# Check if session exists in our storage
|
|
742
|
+
if session_id in self.delegation_requests:
|
|
743
|
+
print(" - ✅ Session found in delegation_requests", file=sys.stderr)
|
|
744
|
+
print(
|
|
745
|
+
f" - Stored agent: {self.delegation_requests[session_id].get('agent_type')}",
|
|
746
|
+
file=sys.stderr,
|
|
747
|
+
)
|
|
748
|
+
else:
|
|
749
|
+
print(
|
|
750
|
+
" - ❌ Session NOT found in delegation_requests!", file=sys.stderr
|
|
751
|
+
)
|
|
752
|
+
print(" - Looking for partial match...", file=sys.stderr)
|
|
753
|
+
# Try to find partial matches
|
|
754
|
+
for stored_sid in list(self.delegation_requests.keys())[:10]:
|
|
755
|
+
if stored_sid.startswith(session_id[:8]) or session_id.startswith(
|
|
756
|
+
stored_sid[:8]
|
|
757
|
+
):
|
|
758
|
+
print(
|
|
759
|
+
f" - Partial match found: {stored_sid[:16]}...",
|
|
760
|
+
file=sys.stderr,
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
if (
|
|
764
|
+
self.response_tracking_manager.response_tracking_enabled
|
|
765
|
+
and self.response_tracking_manager.response_tracker
|
|
766
|
+
):
|
|
767
|
+
try:
|
|
768
|
+
# Get the original request data (with fuzzy matching fallback)
|
|
769
|
+
request_info = self.delegation_requests.get(session_id)
|
|
770
|
+
|
|
771
|
+
# If exact match fails, try partial matching
|
|
772
|
+
if not request_info and session_id:
|
|
773
|
+
if DEBUG:
|
|
774
|
+
print(
|
|
775
|
+
f" - Trying fuzzy match for session {session_id[:16]}...",
|
|
776
|
+
file=sys.stderr,
|
|
777
|
+
)
|
|
778
|
+
# Try to find a session that matches the first 8-16 characters
|
|
779
|
+
for stored_sid in list(self.delegation_requests.keys()):
|
|
780
|
+
if (
|
|
781
|
+
stored_sid.startswith(session_id[:8])
|
|
782
|
+
or session_id.startswith(stored_sid[:8])
|
|
783
|
+
or (
|
|
784
|
+
len(session_id) >= 16
|
|
785
|
+
and len(stored_sid) >= 16
|
|
786
|
+
and stored_sid[:16] == session_id[:16]
|
|
787
|
+
)
|
|
788
|
+
):
|
|
789
|
+
if DEBUG:
|
|
790
|
+
print(
|
|
791
|
+
f" - \u2705 Fuzzy match found: {stored_sid[:16]}...",
|
|
792
|
+
file=sys.stderr,
|
|
793
|
+
)
|
|
794
|
+
request_info = self.delegation_requests.get(stored_sid)
|
|
795
|
+
# Update the key to use the current session_id for consistency
|
|
796
|
+
if request_info:
|
|
797
|
+
self.delegation_requests[session_id] = request_info
|
|
798
|
+
# Optionally remove the old key to avoid duplicates
|
|
799
|
+
if stored_sid != session_id:
|
|
800
|
+
del self.delegation_requests[stored_sid]
|
|
801
|
+
break
|
|
802
|
+
|
|
803
|
+
if DEBUG:
|
|
804
|
+
print(
|
|
805
|
+
f" - request_info present: {bool(request_info)}",
|
|
806
|
+
file=sys.stderr,
|
|
807
|
+
)
|
|
808
|
+
if request_info:
|
|
809
|
+
print(
|
|
810
|
+
" - ✅ Found request data for response tracking",
|
|
811
|
+
file=sys.stderr,
|
|
812
|
+
)
|
|
813
|
+
print(
|
|
814
|
+
f" - stored agent_type: {request_info.get('agent_type')}",
|
|
815
|
+
file=sys.stderr,
|
|
816
|
+
)
|
|
817
|
+
print(
|
|
818
|
+
f" - request keys: {list(request_info.get('request', {}).keys())}",
|
|
819
|
+
file=sys.stderr,
|
|
820
|
+
)
|
|
821
|
+
else:
|
|
822
|
+
print(
|
|
823
|
+
f" - ❌ No request data found for session {session_id[:16]}...",
|
|
824
|
+
file=sys.stderr,
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
if request_info:
|
|
828
|
+
# Use the output as the response
|
|
829
|
+
response_text = (
|
|
830
|
+
str(output)
|
|
831
|
+
if output
|
|
832
|
+
else f"Agent {agent_type} completed with reason: {reason}"
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
# Get the original request
|
|
836
|
+
original_request = request_info.get("request", {})
|
|
837
|
+
prompt = original_request.get("prompt", "")
|
|
838
|
+
description = original_request.get("description", "")
|
|
839
|
+
|
|
840
|
+
# Combine prompt and description
|
|
841
|
+
full_request = prompt
|
|
842
|
+
if description and description != prompt:
|
|
843
|
+
if full_request:
|
|
844
|
+
full_request += f"\n\nDescription: {description}"
|
|
845
|
+
else:
|
|
846
|
+
full_request = description
|
|
847
|
+
|
|
848
|
+
if not full_request:
|
|
849
|
+
full_request = f"Task delegation to {agent_type} agent"
|
|
850
|
+
|
|
851
|
+
# Prepare metadata
|
|
852
|
+
metadata = {
|
|
853
|
+
"exit_code": event.get("exit_code", 0),
|
|
854
|
+
"success": reason in ["completed", "finished", "done"],
|
|
855
|
+
"has_error": reason
|
|
856
|
+
in ["error", "timeout", "failed", "blocked"],
|
|
857
|
+
"duration_ms": event.get("duration_ms"),
|
|
858
|
+
"working_directory": working_dir,
|
|
859
|
+
"git_branch": git_branch,
|
|
860
|
+
"timestamp": datetime.now().isoformat(),
|
|
861
|
+
"event_type": "subagent_stop",
|
|
862
|
+
"reason": reason,
|
|
863
|
+
"original_request_timestamp": request_info.get("timestamp"),
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
# Add structured response if available
|
|
867
|
+
if structured_response:
|
|
868
|
+
metadata["structured_response"] = structured_response
|
|
869
|
+
metadata["task_completed"] = structured_response.get(
|
|
870
|
+
"task_completed", False
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
# Check for MEMORIES field and process if present
|
|
874
|
+
if structured_response.get("MEMORIES"):
|
|
875
|
+
memories = structured_response["MEMORIES"]
|
|
876
|
+
if DEBUG:
|
|
877
|
+
print(
|
|
878
|
+
f"Found MEMORIES field in {agent_type} response with {len(memories)} items",
|
|
879
|
+
file=sys.stderr,
|
|
880
|
+
)
|
|
881
|
+
# The memory will be processed by extract_and_update_memory
|
|
882
|
+
# which is called by the memory hook service
|
|
883
|
+
|
|
884
|
+
# Track the response
|
|
885
|
+
file_path = (
|
|
886
|
+
self.response_tracking_manager.response_tracker.track_response(
|
|
887
|
+
agent_name=agent_type,
|
|
888
|
+
request=full_request,
|
|
889
|
+
response=response_text,
|
|
890
|
+
session_id=session_id,
|
|
891
|
+
metadata=metadata,
|
|
892
|
+
)
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
if file_path and DEBUG:
|
|
896
|
+
print(
|
|
897
|
+
f"✅ Tracked {agent_type} agent response on SubagentStop: {file_path.name}",
|
|
898
|
+
file=sys.stderr,
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
# Clean up the request data
|
|
902
|
+
if session_id in self.delegation_requests:
|
|
903
|
+
del self.delegation_requests[session_id]
|
|
904
|
+
|
|
905
|
+
elif DEBUG:
|
|
906
|
+
print(
|
|
907
|
+
f"No request data for SubagentStop session {session_id[:8]}..., agent: {agent_type}",
|
|
908
|
+
file=sys.stderr,
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
except Exception as e:
|
|
912
|
+
if DEBUG:
|
|
913
|
+
print(
|
|
914
|
+
f"❌ Failed to track response on SubagentStop: {e}",
|
|
915
|
+
file=sys.stderr,
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
subagent_stop_data = {
|
|
919
|
+
"agent_type": agent_type,
|
|
920
|
+
"agent_id": agent_id,
|
|
921
|
+
"reason": reason,
|
|
922
|
+
"session_id": session_id,
|
|
923
|
+
"working_directory": working_dir,
|
|
924
|
+
"git_branch": git_branch,
|
|
925
|
+
"timestamp": datetime.now().isoformat(),
|
|
926
|
+
"is_successful_completion": reason in ["completed", "finished", "done"],
|
|
927
|
+
"is_error_termination": reason in ["error", "timeout", "failed", "blocked"],
|
|
928
|
+
"is_delegation_related": agent_type
|
|
929
|
+
in ["research", "engineer", "pm", "ops", "qa", "documentation", "security"],
|
|
930
|
+
"has_results": bool(event.get("results") or event.get("output")),
|
|
931
|
+
"duration_context": event.get("duration_ms"),
|
|
932
|
+
"hook_event_name": "SubagentStop", # Explicitly set for dashboard
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
# Add structured response data if available
|
|
936
|
+
if structured_response:
|
|
937
|
+
subagent_stop_data["structured_response"] = {
|
|
938
|
+
"task_completed": structured_response.get("task_completed", False),
|
|
939
|
+
"instructions": structured_response.get("instructions", ""),
|
|
940
|
+
"results": structured_response.get("results", ""),
|
|
941
|
+
"files_modified": structured_response.get("files_modified", []),
|
|
942
|
+
"tools_used": structured_response.get("tools_used", []),
|
|
943
|
+
"remember": structured_response.get("remember"),
|
|
944
|
+
"MEMORIES": structured_response.get(
|
|
945
|
+
"MEMORIES"
|
|
946
|
+
), # Complete memory replacement
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
# Log if MEMORIES field is present
|
|
950
|
+
if structured_response.get("MEMORIES"):
|
|
951
|
+
if DEBUG:
|
|
952
|
+
memories_count = len(structured_response["MEMORIES"])
|
|
953
|
+
print(
|
|
954
|
+
f"Agent {agent_type} returned MEMORIES field with {memories_count} items",
|
|
955
|
+
file=sys.stderr,
|
|
956
|
+
)
|
|
957
|
+
|
|
958
|
+
# Debug log the processed data
|
|
959
|
+
if DEBUG:
|
|
960
|
+
print(
|
|
961
|
+
f"SubagentStop processed data: agent_type='{agent_type}', session_id='{session_id}'",
|
|
962
|
+
file=sys.stderr,
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
# Emit to /hook namespace with high priority
|
|
966
|
+
self._emit_socketio_event("/hook", "subagent_stop", subagent_stop_data)
|
|
967
|
+
|
|
968
|
+
def __del__(self):
|
|
969
|
+
"""Cleanup on handler destruction."""
|
|
970
|
+
# Clean up connection pool if it exists
|
|
971
|
+
if hasattr(self, "connection_pool") and self.connection_pool:
|
|
972
|
+
try:
|
|
973
|
+
self.connection_pool.cleanup()
|
|
974
|
+
except:
|
|
975
|
+
pass # Ignore cleanup errors during destruction
|
|
976
|
+
|
|
977
|
+
|
|
978
|
+
def main():
|
|
979
|
+
"""Entry point with singleton pattern and proper cleanup."""
|
|
980
|
+
global _global_handler
|
|
981
|
+
_continue_printed = False # Track if we've already printed continue
|
|
982
|
+
|
|
983
|
+
def cleanup_handler(signum=None, frame=None):
|
|
984
|
+
"""Cleanup handler for signals and exit."""
|
|
985
|
+
nonlocal _continue_printed
|
|
986
|
+
if DEBUG:
|
|
987
|
+
print(
|
|
988
|
+
f"Hook handler cleanup (pid: {os.getpid()}, signal: {signum})",
|
|
989
|
+
file=sys.stderr,
|
|
990
|
+
)
|
|
991
|
+
# Only output continue if we haven't already (i.e., if interrupted by signal)
|
|
992
|
+
if signum is not None and not _continue_printed:
|
|
993
|
+
print(json.dumps({"action": "continue"}))
|
|
994
|
+
_continue_printed = True
|
|
995
|
+
sys.exit(0)
|
|
996
|
+
|
|
997
|
+
# Register cleanup handlers
|
|
998
|
+
signal.signal(signal.SIGTERM, cleanup_handler)
|
|
999
|
+
signal.signal(signal.SIGINT, cleanup_handler)
|
|
1000
|
+
# Don't register atexit handler since we're handling exit properly in main
|
|
1001
|
+
|
|
1002
|
+
try:
|
|
1003
|
+
# Use singleton pattern to prevent creating multiple instances
|
|
1004
|
+
with _handler_lock:
|
|
1005
|
+
if _global_handler is None:
|
|
1006
|
+
_global_handler = ClaudeHookHandler()
|
|
1007
|
+
if DEBUG:
|
|
1008
|
+
print(
|
|
1009
|
+
f"✅ Created new ClaudeHookHandler singleton (pid: {os.getpid()})",
|
|
1010
|
+
file=sys.stderr,
|
|
1011
|
+
)
|
|
1012
|
+
elif DEBUG:
|
|
1013
|
+
print(
|
|
1014
|
+
f"♻️ Reusing existing ClaudeHookHandler singleton (pid: {os.getpid()})",
|
|
1015
|
+
file=sys.stderr,
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
handler = _global_handler
|
|
1019
|
+
|
|
1020
|
+
# Mark that handle() will print continue
|
|
1021
|
+
handler.handle()
|
|
1022
|
+
_continue_printed = True # Mark as printed since handle() always prints it
|
|
1023
|
+
|
|
1024
|
+
# handler.handle() already calls _continue_execution(), so we don't need to do it again
|
|
1025
|
+
# Just exit cleanly
|
|
1026
|
+
sys.exit(0)
|
|
1027
|
+
|
|
1028
|
+
except Exception as e:
|
|
1029
|
+
# Only output continue if not already printed
|
|
1030
|
+
if not _continue_printed:
|
|
1031
|
+
print(json.dumps({"action": "continue"}))
|
|
1032
|
+
_continue_printed = True
|
|
1033
|
+
# Log error for debugging
|
|
1034
|
+
if DEBUG:
|
|
1035
|
+
print(f"Hook handler error: {e}", file=sys.stderr)
|
|
1036
|
+
sys.exit(0) # Exit cleanly even on error
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
if __name__ == "__main__":
|
|
1040
|
+
main()
|