claude-mpm 4.0.3__py3-none-any.whl → 4.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. claude_mpm/agents/templates/ticketing.json +1 -1
  2. claude_mpm/cli/commands/monitor.py +131 -9
  3. claude_mpm/cli/commands/tickets.py +61 -26
  4. claude_mpm/cli/parsers/monitor_parser.py +22 -2
  5. claude_mpm/dashboard/static/built/components/agent-inference.js +2 -0
  6. claude_mpm/dashboard/static/built/components/event-processor.js +2 -0
  7. claude_mpm/dashboard/static/built/components/event-viewer.js +2 -0
  8. claude_mpm/dashboard/static/built/components/export-manager.js +2 -0
  9. claude_mpm/dashboard/static/built/components/file-tool-tracker.js +2 -0
  10. claude_mpm/dashboard/static/built/components/hud-library-loader.js +2 -0
  11. claude_mpm/dashboard/static/built/components/hud-manager.js +2 -0
  12. claude_mpm/dashboard/static/built/components/hud-visualizer.js +2 -0
  13. claude_mpm/dashboard/static/built/components/module-viewer.js +2 -0
  14. claude_mpm/dashboard/static/built/components/session-manager.js +2 -0
  15. claude_mpm/dashboard/static/built/components/socket-manager.js +2 -0
  16. claude_mpm/dashboard/static/built/components/ui-state-manager.js +2 -0
  17. claude_mpm/dashboard/static/built/components/working-directory.js +2 -0
  18. claude_mpm/dashboard/static/built/dashboard.js +2 -0
  19. claude_mpm/dashboard/static/built/socket-client.js +2 -0
  20. claude_mpm/dashboard/static/dist/components/event-viewer.js +1 -1
  21. claude_mpm/dashboard/static/dist/components/file-tool-tracker.js +1 -1
  22. claude_mpm/dashboard/static/dist/socket-client.js +1 -1
  23. claude_mpm/dashboard/static/js/components/event-viewer.js +20 -3
  24. claude_mpm/dashboard/static/js/components/file-tool-tracker.js +5 -5
  25. claude_mpm/dashboard/static/js/socket-client.js +18 -4
  26. claude_mpm/services/port_manager.py +370 -18
  27. claude_mpm/services/socketio/handlers/connection.py +41 -19
  28. claude_mpm/services/socketio/handlers/hook.py +23 -8
  29. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/METADATA +64 -22
  30. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/RECORD +34 -19
  31. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/WHEEL +0 -0
  32. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/entry_points.txt +0 -0
  33. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/licenses/LICENSE +0 -0
  34. {claude_mpm-4.0.3.dist-info → claude_mpm-4.0.4.dist-info}/top_level.txt +0 -0
@@ -8,17 +8,28 @@ Ensures only one instance runs per port and provides fallback port selection.
8
8
 
9
9
  import json
10
10
  import os
11
+ import signal
11
12
  import socket
12
13
  import subprocess
13
14
  import time
14
15
  from pathlib import Path
15
- from typing import Dict, List, Optional, Tuple
16
+ from typing import Dict, List, Optional, Tuple, NamedTuple
16
17
 
17
18
  import psutil
18
19
 
19
20
  from ..core.logging_config import get_logger
20
21
 
21
22
 
23
+ class ProcessInfo(NamedTuple):
24
+ """Information about a process using a port."""
25
+ pid: int
26
+ name: str
27
+ cmdline: str
28
+ is_ours: bool
29
+ is_debug: bool
30
+ is_daemon: bool
31
+
32
+
22
33
  class PortManager:
23
34
  """Manages port allocation and instance detection for SocketIO servers."""
24
35
 
@@ -42,6 +53,254 @@ class PortManager:
42
53
  return True
43
54
  except OSError:
44
55
  return False
56
+
57
+ def get_process_on_port(self, port: int) -> Optional[ProcessInfo]:
58
+ """Get information about the process using a specific port.
59
+
60
+ WHY: We need to identify what process is using a port to make intelligent
61
+ decisions about whether we can reclaim it (our debug scripts) or must
62
+ avoid it (external processes or our daemons).
63
+
64
+ Returns:
65
+ ProcessInfo with details about the process, or None if port is free
66
+ """
67
+ try:
68
+ # First try using lsof as it's more reliable for port detection
69
+ try:
70
+ result = subprocess.run(
71
+ ['lsof', '-i', f':{port}', '-sTCP:LISTEN', '-t'],
72
+ capture_output=True,
73
+ text=True,
74
+ timeout=2
75
+ )
76
+ if result.returncode == 0 and result.stdout.strip():
77
+ # Get the PID from lsof output
78
+ pid = int(result.stdout.strip().split()[0])
79
+ try:
80
+ process = psutil.Process(pid)
81
+ cmdline = ' '.join(process.cmdline())
82
+
83
+ # Determine if this is our process and what type
84
+ is_ours = self._is_our_process(pid, cmdline)
85
+ is_debug = self._is_debug_process(cmdline) if is_ours else False
86
+ is_daemon = self._is_daemon_process(cmdline) if is_ours else False
87
+
88
+ return ProcessInfo(
89
+ pid=pid,
90
+ name=process.name(),
91
+ cmdline=cmdline,
92
+ is_ours=is_ours,
93
+ is_debug=is_debug,
94
+ is_daemon=is_daemon
95
+ )
96
+ except (psutil.NoSuchProcess, psutil.AccessDenied) as e:
97
+ # Process exists but we can't access it
98
+ return ProcessInfo(
99
+ pid=pid,
100
+ name="unknown",
101
+ cmdline="<permission denied>",
102
+ is_ours=False,
103
+ is_debug=False,
104
+ is_daemon=False
105
+ )
106
+ except (subprocess.TimeoutExpired, FileNotFoundError):
107
+ # lsof not available or timed out, fall back to psutil
108
+ pass
109
+
110
+ # Fallback to psutil method
111
+ for conn in psutil.net_connections(kind='inet'):
112
+ if conn.laddr.port == port and conn.status == 'LISTEN':
113
+ try:
114
+ process = psutil.Process(conn.pid)
115
+ cmdline = ' '.join(process.cmdline())
116
+
117
+ # Determine if this is our process and what type
118
+ is_ours = self._is_our_process(conn.pid, cmdline)
119
+ is_debug = self._is_debug_process(cmdline) if is_ours else False
120
+ is_daemon = self._is_daemon_process(cmdline) if is_ours else False
121
+
122
+ return ProcessInfo(
123
+ pid=conn.pid,
124
+ name=process.name(),
125
+ cmdline=cmdline,
126
+ is_ours=is_ours,
127
+ is_debug=is_debug,
128
+ is_daemon=is_daemon
129
+ )
130
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
131
+ # Can't access process details, mark as unknown external
132
+ return ProcessInfo(
133
+ pid=conn.pid,
134
+ name="unknown",
135
+ cmdline="<permission denied>",
136
+ is_ours=False,
137
+ is_debug=False,
138
+ is_daemon=False
139
+ )
140
+ except psutil.AccessDenied:
141
+ # No permission to check network connections
142
+ # Try socket binding as last resort
143
+ if not self.is_port_available(port):
144
+ # Port is in use but we can't determine by what
145
+ return ProcessInfo(
146
+ pid=0,
147
+ name="unknown",
148
+ cmdline="<unable to determine>",
149
+ is_ours=False,
150
+ is_debug=False,
151
+ is_daemon=False
152
+ )
153
+ except Exception as e:
154
+ self.logger.debug(f"Error getting process on port {port}: {e}")
155
+
156
+ return None
157
+
158
+ def _is_our_process(self, pid: int, cmdline: str = None) -> bool:
159
+ """Check if a process belongs to claude-mpm.
160
+
161
+ WHY: We need to distinguish our processes from external ones to know
162
+ which ports we can potentially reclaim.
163
+ """
164
+ try:
165
+ if cmdline is None:
166
+ process = psutil.Process(pid)
167
+ cmdline = ' '.join(process.cmdline())
168
+
169
+ cmdline_lower = cmdline.lower()
170
+
171
+ # Check for claude-mpm related patterns
172
+ our_patterns = [
173
+ 'claude-mpm',
174
+ 'claude_mpm',
175
+ 'socketio_debug',
176
+ 'socketio_daemon',
177
+ 'socketio_server',
178
+ str(self.project_root).lower(), # Running from our project directory
179
+ 'scripts/test_', # Our test scripts
180
+ 'scripts/debug_', # Our debug scripts
181
+ 'scripts/demo_', # Our demo scripts
182
+ 'scripts/run_', # Our run scripts
183
+ 'scripts/validate_', # Our validation scripts
184
+ ]
185
+
186
+ return any(pattern in cmdline_lower for pattern in our_patterns)
187
+
188
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
189
+ return False
190
+
191
+ def _is_debug_process(self, cmdline: str) -> bool:
192
+ """Check if a process is a debug/test script (safe to kill).
193
+
194
+ WHY: Debug and test scripts can be safely terminated to reclaim ports,
195
+ unlike production daemons which should be preserved.
196
+ """
197
+ cmdline_lower = cmdline.lower()
198
+
199
+ debug_patterns = [
200
+ 'socketio_debug.py',
201
+ 'run_socketio_debug.py',
202
+ 'test_',
203
+ 'debug_',
204
+ 'demo_',
205
+ 'validate_',
206
+ 'scripts/test',
207
+ 'scripts/debug',
208
+ 'scripts/demo',
209
+ 'scripts/validate',
210
+ ]
211
+
212
+ # Also check if NOT a daemon (daemons are not debug scripts)
213
+ is_not_daemon = 'daemon' not in cmdline_lower or 'debug' in cmdline_lower
214
+
215
+ return any(pattern in cmdline_lower for pattern in debug_patterns) and is_not_daemon
216
+
217
+ def _is_daemon_process(self, cmdline: str) -> bool:
218
+ """Check if a process is a daemon (should be preserved).
219
+
220
+ WHY: Daemon processes are production services that should not be
221
+ automatically killed. Users must explicitly stop them.
222
+ """
223
+ cmdline_lower = cmdline.lower()
224
+
225
+ daemon_patterns = [
226
+ 'socketio_daemon',
227
+ 'claude-mpm monitor',
228
+ 'daemon',
229
+ ]
230
+
231
+ # Exclude debug daemons
232
+ if 'debug' in cmdline_lower:
233
+ return False
234
+
235
+ return any(pattern in cmdline_lower for pattern in daemon_patterns)
236
+
237
+ def kill_process_on_port(self, port: int, force: bool = False) -> bool:
238
+ """Kill a process using a specific port if it's safe to do so.
239
+
240
+ WHY: Automatically reclaim ports from our debug scripts while preserving
241
+ daemons and avoiding external processes.
242
+
243
+ Args:
244
+ port: Port number to reclaim
245
+ force: If True, kill even daemon processes (requires explicit user action)
246
+
247
+ Returns:
248
+ True if process was killed or port is now free, False otherwise
249
+ """
250
+ process_info = self.get_process_on_port(port)
251
+
252
+ if not process_info:
253
+ self.logger.info(f"Port {port} is already free")
254
+ return True
255
+
256
+ if not process_info.is_ours:
257
+ self.logger.warning(
258
+ f"Port {port} is used by external process '{process_info.name}' "
259
+ f"(PID: {process_info.pid}). Cannot reclaim."
260
+ )
261
+ return False
262
+
263
+ if process_info.is_daemon and not force:
264
+ self.logger.warning(
265
+ f"Port {port} is used by our daemon process (PID: {process_info.pid}). "
266
+ f"Use --force flag or stop the daemon explicitly."
267
+ )
268
+ return False
269
+
270
+ if process_info.is_debug or force:
271
+ try:
272
+ self.logger.info(
273
+ f"Killing {'debug' if process_info.is_debug else 'daemon'} process "
274
+ f"{process_info.pid} on port {port}"
275
+ )
276
+
277
+ # Try graceful termination first
278
+ os.kill(process_info.pid, signal.SIGTERM)
279
+
280
+ # Wait up to 2 seconds for graceful shutdown
281
+ for _ in range(20):
282
+ time.sleep(0.1)
283
+ if not psutil.pid_exists(process_info.pid):
284
+ self.logger.info(f"Process {process_info.pid} terminated gracefully")
285
+ return True
286
+
287
+ # Force kill if still running
288
+ self.logger.warning(f"Process {process_info.pid} didn't terminate, forcing kill")
289
+ os.kill(process_info.pid, signal.SIGKILL)
290
+ time.sleep(0.5)
291
+
292
+ if not psutil.pid_exists(process_info.pid):
293
+ self.logger.info(f"Process {process_info.pid} force killed")
294
+ return True
295
+ else:
296
+ self.logger.error(f"Failed to kill process {process_info.pid}")
297
+ return False
298
+
299
+ except Exception as e:
300
+ self.logger.error(f"Error killing process {process_info.pid}: {e}")
301
+ return False
302
+
303
+ return False
45
304
 
46
305
  def is_claude_mpm_instance(self, port: int) -> Tuple[bool, Optional[Dict]]:
47
306
  """Check if a port is being used by a claude-mpm SocketIO instance."""
@@ -82,29 +341,61 @@ class PortManager:
82
341
  return False
83
342
 
84
343
  def find_available_port(
85
- self, preferred_port: Optional[int] = None
344
+ self, preferred_port: Optional[int] = None, reclaim: bool = True
86
345
  ) -> Optional[int]:
87
- """Find an available port, preferring the specified port if given."""
346
+ """Find an available port, preferring the specified port if given.
347
+
348
+ WHY: Enhanced to intelligently reclaim ports from our debug processes
349
+ while avoiding external processes and preserving daemons.
350
+
351
+ Args:
352
+ preferred_port: Port to try first
353
+ reclaim: If True, try to reclaim ports from our debug scripts
354
+
355
+ Returns:
356
+ Available port number or None if no ports available
357
+ """
88
358
  # Try preferred port first
89
359
  if preferred_port and preferred_port in self.PORT_RANGE:
90
360
  if self.is_port_available(preferred_port):
91
- is_ours, instance_info = self.is_claude_mpm_instance(preferred_port)
92
- if not is_ours:
93
- return preferred_port
94
- else:
95
- self.logger.warning(
96
- f"Port {preferred_port} is already used by claude-mpm instance: {instance_info}"
361
+ return preferred_port
362
+
363
+ # Port is in use - check if we can reclaim it
364
+ if reclaim:
365
+ process_info = self.get_process_on_port(preferred_port)
366
+ if process_info and process_info.is_ours and process_info.is_debug:
367
+ self.logger.info(
368
+ f"Port {preferred_port} used by our debug process, attempting to reclaim"
97
369
  )
370
+ if self.kill_process_on_port(preferred_port):
371
+ time.sleep(0.5) # Brief pause for port to be released
372
+ if self.is_port_available(preferred_port):
373
+ return preferred_port
374
+ elif process_info:
375
+ if process_info.is_daemon:
376
+ self.logger.warning(
377
+ f"Port {preferred_port} used by our daemon (PID: {process_info.pid})"
378
+ )
379
+ elif not process_info.is_ours:
380
+ self.logger.warning(
381
+ f"Port {preferred_port} used by external process '{process_info.name}'"
382
+ )
98
383
 
99
384
  # Try default port
100
385
  if self.is_port_available(self.DEFAULT_PORT):
101
- is_ours, instance_info = self.is_claude_mpm_instance(self.DEFAULT_PORT)
102
- if not is_ours:
103
- return self.DEFAULT_PORT
104
- else:
386
+ return self.DEFAULT_PORT
387
+
388
+ # Check if we can reclaim default port
389
+ if reclaim:
390
+ process_info = self.get_process_on_port(self.DEFAULT_PORT)
391
+ if process_info and process_info.is_ours and process_info.is_debug:
105
392
  self.logger.info(
106
- f"Default port {self.DEFAULT_PORT} is already used by claude-mpm instance"
393
+ f"Default port {self.DEFAULT_PORT} used by our debug process, attempting to reclaim"
107
394
  )
395
+ if self.kill_process_on_port(self.DEFAULT_PORT):
396
+ time.sleep(0.5)
397
+ if self.is_port_available(self.DEFAULT_PORT):
398
+ return self.DEFAULT_PORT
108
399
 
109
400
  # Try other ports in range
110
401
  for port in self.PORT_RANGE:
@@ -112,10 +403,20 @@ class PortManager:
112
403
  continue # Already tried
113
404
 
114
405
  if self.is_port_available(port):
115
- is_ours, instance_info = self.is_claude_mpm_instance(port)
116
- if not is_ours:
117
- self.logger.info(f"Selected available port: {port}")
118
- return port
406
+ return port
407
+
408
+ # Try to reclaim if it's our debug process
409
+ if reclaim:
410
+ process_info = self.get_process_on_port(port)
411
+ if process_info and process_info.is_ours and process_info.is_debug:
412
+ self.logger.info(
413
+ f"Port {port} used by our debug process, attempting to reclaim"
414
+ )
415
+ if self.kill_process_on_port(port):
416
+ time.sleep(0.5)
417
+ if self.is_port_available(port):
418
+ self.logger.info(f"Reclaimed port {port}")
419
+ return port
119
420
 
120
421
  self.logger.error(
121
422
  f"No available ports in range {self.PORT_RANGE.start}-{self.PORT_RANGE.stop-1}"
@@ -221,3 +522,54 @@ class PortManager:
221
522
  return instance_info
222
523
 
223
524
  return None
525
+
526
+ def get_port_status(self, port: int) -> Dict[str, any]:
527
+ """Get detailed status of a port including what's using it.
528
+
529
+ WHY: Provides comprehensive information for users to understand
530
+ port conflicts and make informed decisions.
531
+
532
+ Returns:
533
+ Dictionary with port status details
534
+ """
535
+ status = {
536
+ "port": port,
537
+ "available": self.is_port_available(port),
538
+ "process": None,
539
+ "instance": None,
540
+ "recommendation": None
541
+ }
542
+
543
+ # Check for process using the port
544
+ process_info = self.get_process_on_port(port)
545
+ if process_info:
546
+ status["process"] = {
547
+ "pid": process_info.pid,
548
+ "name": process_info.name,
549
+ "is_ours": process_info.is_ours,
550
+ "is_debug": process_info.is_debug,
551
+ "is_daemon": process_info.is_daemon,
552
+ "cmdline": process_info.cmdline[:100] + "..." if len(process_info.cmdline) > 100 else process_info.cmdline
553
+ }
554
+
555
+ # Provide recommendation based on process type
556
+ if process_info.is_ours:
557
+ if process_info.is_debug:
558
+ status["recommendation"] = "Can be automatically reclaimed (debug process)"
559
+ elif process_info.is_daemon:
560
+ status["recommendation"] = "Stop daemon with 'claude-mpm monitor stop' or use --force"
561
+ else:
562
+ status["recommendation"] = "Our process, consider stopping it manually"
563
+ else:
564
+ status["recommendation"] = "External process, choose a different port"
565
+
566
+ # Check for registered instance
567
+ instance_info = self.get_instance_by_port(port)
568
+ if instance_info:
569
+ status["instance"] = {
570
+ "id": instance_info.get("instance_id"),
571
+ "pid": instance_info.get("pid"),
572
+ "start_time": instance_info.get("start_time")
573
+ }
574
+
575
+ return status
@@ -88,7 +88,7 @@ class ConnectionEventHandler(BaseEventHandler):
88
88
  )
89
89
 
90
90
  @self.sio.event
91
- async def get_status(sid, data=None):
91
+ async def get_status(sid):
92
92
  """Handle status request.
93
93
 
94
94
  WHY: Clients need to query current server status on demand
@@ -151,27 +151,45 @@ class ConnectionEventHandler(BaseEventHandler):
151
151
  self.logger.info(f"🔵 Received claude_event from {sid}: {data}")
152
152
 
153
153
  # Check if this is a hook event and route to HookEventHandler
154
- if isinstance(data, dict) and data.get("type") == "hook":
155
- # Get the hook handler if available
156
- hook_handler = None
157
- # Check if event_registry exists and has handlers
158
- if hasattr(self.server, 'event_registry') and self.server.event_registry and hasattr(self.server.event_registry, 'handlers'):
159
- for handler in self.server.event_registry.handlers:
160
- if handler.__class__.__name__ == "HookEventHandler":
161
- hook_handler = handler
162
- break
163
-
164
- if hook_handler and hasattr(hook_handler, "process_hook_event"):
165
- # Let the hook handler process this event
166
- await hook_handler.process_hook_event(data)
167
- # Don't double-store or double-broadcast, return early
168
- return
154
+ # Hook events have types like "hook.user_prompt", "hook.pre_tool", etc.
155
+ if isinstance(data, dict):
156
+ event_type = data.get("type", "")
157
+ if isinstance(event_type, str) and event_type.startswith("hook."):
158
+ # Get the hook handler if available
159
+ hook_handler = None
160
+ # Check if event_registry exists and has handlers
161
+ if hasattr(self.server, 'event_registry') and self.server.event_registry:
162
+ if hasattr(self.server.event_registry, 'handlers'):
163
+ for handler in self.server.event_registry.handlers:
164
+ if handler.__class__.__name__ == "HookEventHandler":
165
+ hook_handler = handler
166
+ break
167
+
168
+ if hook_handler and hasattr(hook_handler, "process_hook_event"):
169
+ # Let the hook handler process this event
170
+ await hook_handler.process_hook_event(data)
171
+ # Don't double-store or double-broadcast, return early
172
+ return
169
173
 
170
174
  # Normalize event format before storing in history
171
175
  normalized_event = self._normalize_event(data)
172
176
 
173
- # Store in history
174
- self.event_history.append(normalized_event)
177
+ # Store in history - flatten if it's a nested structure
178
+ # If the normalized event has data.event, promote it to top level
179
+ if isinstance(normalized_event, dict) and 'data' in normalized_event:
180
+ if isinstance(normalized_event['data'], dict) and 'event' in normalized_event['data']:
181
+ # This is a nested event, flatten it
182
+ flattened = {
183
+ 'type': normalized_event.get('type', 'unknown'),
184
+ 'event': normalized_event['data'].get('event'),
185
+ 'timestamp': normalized_event.get('timestamp') or normalized_event['data'].get('timestamp'),
186
+ 'data': normalized_event['data'].get('data', {})
187
+ }
188
+ self.event_history.append(flattened)
189
+ else:
190
+ self.event_history.append(normalized_event)
191
+ else:
192
+ self.event_history.append(normalized_event)
175
193
  self.logger.info(
176
194
  f"📚 Event from client stored in history (total: {len(self.event_history)})"
177
195
  )
@@ -186,7 +204,8 @@ class ConnectionEventHandler(BaseEventHandler):
186
204
 
187
205
  WHY: Different clients may send events in different formats.
188
206
  This ensures all events have a consistent 'type' field for
189
- proper display in the dashboard.
207
+ proper display in the dashboard, while preserving the original
208
+ 'event' field for hook events.
190
209
  """
191
210
  if not isinstance(event_data, dict):
192
211
  return event_data
@@ -210,6 +229,9 @@ class ConnectionEventHandler(BaseEventHandler):
210
229
  else:
211
230
  # Default to system type for unknown events
212
231
  normalized['type'] = 'system'
232
+
233
+ # Note: We keep the 'event' field for backward compatibility
234
+ # Dashboard may use it for display purposes
213
235
 
214
236
  # Ensure there's always a type field
215
237
  if 'type' not in normalized:
@@ -32,26 +32,41 @@ class HookEventHandler(BaseEventHandler):
32
32
  """Process a hook event received from ConnectionEventHandler.
33
33
 
34
34
  WHY: This method is called by ConnectionEventHandler when it receives
35
- a claude_event with type 'hook'. This separation avoids handler conflicts.
35
+ a claude_event with type starting with 'hook.'. This separation avoids handler conflicts.
36
36
 
37
37
  Args:
38
- data: The complete event data including type, event, and data fields
38
+ data: The complete event data including type, timestamp, and data fields
39
39
  """
40
40
  if not isinstance(data, dict):
41
41
  return
42
42
 
43
43
  # Extract hook event details
44
- hook_event = data.get("event")
44
+ # Hook events come as: { type: "hook.user_prompt", timestamp: "...", data: {...} }
45
+ event_type = data.get("type", "")
46
+
47
+ # Extract the actual hook event name from the type (e.g., "hook.user_prompt" -> "user_prompt")
48
+ if event_type.startswith("hook."):
49
+ hook_event = event_type[5:] # Remove "hook." prefix
50
+ else:
51
+ hook_event = data.get("event", "") # Fallback for legacy format
52
+
45
53
  hook_data = data.get("data", {})
46
54
 
47
- # Add the event to history for replay
48
- self.add_to_history("hook", {
55
+ # Create properly formatted event for history
56
+ # Note: add_to_history expects the event data directly, not wrapped
57
+ history_event = {
58
+ "type": "hook",
49
59
  "event": hook_event,
50
60
  "data": hook_data,
51
- "timestamp": datetime.now().isoformat()
52
- })
61
+ "timestamp": data.get("timestamp") or datetime.now().isoformat()
62
+ }
63
+
64
+ # Add the event to history for replay
65
+ # The base handler's add_to_history will wrap it properly
66
+ self.event_history.append(history_event)
53
67
 
54
- # Broadcast the event to all connected clients
68
+ # Broadcast the original event to all connected clients
69
+ # (preserves all original fields)
55
70
  await self.broadcast_event("claude_event", data)
56
71
 
57
72
  # Track sessions based on hook events