claude-mpm 4.1.5__py3-none-any.whl → 4.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/templates/research.json +39 -13
- claude_mpm/cli/__init__.py +2 -0
- claude_mpm/cli/commands/__init__.py +2 -0
- claude_mpm/cli/commands/configure.py +1221 -0
- claude_mpm/cli/commands/configure_tui.py +1921 -0
- claude_mpm/cli/parsers/base_parser.py +7 -0
- claude_mpm/cli/parsers/configure_parser.py +119 -0
- claude_mpm/cli/startup_logging.py +39 -12
- claude_mpm/constants.py +1 -0
- claude_mpm/core/socketio_pool.py +35 -3
- claude_mpm/dashboard/static/css/connection-status.css +370 -0
- claude_mpm/dashboard/static/js/components/connection-debug.js +654 -0
- claude_mpm/dashboard/static/js/connection-manager.js +536 -0
- claude_mpm/dashboard/templates/index.html +11 -0
- claude_mpm/hooks/claude_hooks/services/__init__.py +3 -1
- claude_mpm/hooks/claude_hooks/services/connection_manager_http.py +190 -0
- claude_mpm/services/diagnostics/checks/__init__.py +2 -0
- claude_mpm/services/diagnostics/checks/instructions_check.py +418 -0
- claude_mpm/services/diagnostics/diagnostic_runner.py +15 -2
- claude_mpm/services/event_bus/direct_relay.py +173 -0
- claude_mpm/services/socketio/handlers/connection_handler.py +345 -0
- claude_mpm/services/socketio/server/broadcaster.py +32 -1
- claude_mpm/services/socketio/server/connection_manager.py +516 -0
- claude_mpm/services/socketio/server/core.py +63 -0
- claude_mpm/services/socketio/server/eventbus_integration.py +20 -9
- claude_mpm/services/socketio/server/main.py +27 -1
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/METADATA +3 -1
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/RECORD +33 -36
- claude_mpm/agents/OUTPUT_STYLE.md +0 -73
- claude_mpm/agents/backups/INSTRUCTIONS.md +0 -352
- claude_mpm/agents/templates/OPTIMIZATION_REPORT.md +0 -156
- claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json +0 -79
- claude_mpm/agents/templates/backup/documentation_agent_20250726_234551.json +0 -68
- claude_mpm/agents/templates/backup/engineer_agent_20250726_234551.json +0 -77
- claude_mpm/agents/templates/backup/ops_agent_20250726_234551.json +0 -78
- claude_mpm/agents/templates/backup/qa_agent_20250726_234551.json +0 -67
- claude_mpm/agents/templates/backup/research_agent_2025011_234551.json +0 -88
- claude_mpm/agents/templates/backup/research_agent_20250726_234551.json +0 -72
- claude_mpm/agents/templates/backup/research_memory_efficient.json +0 -88
- claude_mpm/agents/templates/backup/security_agent_20250726_234551.json +0 -78
- claude_mpm/agents/templates/backup/version_control_agent_20250726_234551.json +0 -62
- claude_mpm/agents/templates/vercel_ops_instructions.md +0 -582
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/WHEEL +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.1.5.dist-info → claude_mpm-4.1.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Direct EventBus to Socket.IO relay that uses server broadcaster.
|
|
2
|
+
|
|
3
|
+
This module provides a relay that connects EventBus directly to the
|
|
4
|
+
Socket.IO server's broadcaster, avoiding the client loopback issue.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from .event_bus import EventBus
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class DirectSocketIORelay:
|
|
17
|
+
"""Relay EventBus events directly to Socket.IO broadcaster.
|
|
18
|
+
|
|
19
|
+
WHY: The original SocketIORelay creates a client connection back to the server,
|
|
20
|
+
which causes events to not reach the dashboard properly. This direct relay
|
|
21
|
+
uses the server's broadcaster directly for proper event emission.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, server_instance):
|
|
25
|
+
"""Initialize the direct relay.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
server_instance: The SocketIOServer instance with broadcaster
|
|
29
|
+
"""
|
|
30
|
+
self.server = server_instance
|
|
31
|
+
self.event_bus = EventBus.get_instance()
|
|
32
|
+
self.enabled = True
|
|
33
|
+
self.connected = False # Track connection state
|
|
34
|
+
self.stats = {
|
|
35
|
+
"events_relayed": 0,
|
|
36
|
+
"events_failed": 0,
|
|
37
|
+
"last_relay_time": None,
|
|
38
|
+
}
|
|
39
|
+
self.debug = logger.isEnabledFor(logging.DEBUG)
|
|
40
|
+
|
|
41
|
+
def start(self) -> None:
|
|
42
|
+
"""Start the relay by subscribing to EventBus events."""
|
|
43
|
+
if not self.enabled:
|
|
44
|
+
logger.warning("DirectSocketIORelay is disabled")
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
# Create handler for wildcard events
|
|
48
|
+
def handle_wildcard_hook_event(event_type: str, data: Any):
|
|
49
|
+
"""Handle wildcard hook events from the event bus.
|
|
50
|
+
|
|
51
|
+
Wildcard handlers receive both event_type and data.
|
|
52
|
+
This is the primary handler that knows the correct event type.
|
|
53
|
+
"""
|
|
54
|
+
self._handle_hook_event(event_type, data)
|
|
55
|
+
|
|
56
|
+
# Subscribe to all hook events via wildcard
|
|
57
|
+
# This single subscription handles all hook.* events efficiently
|
|
58
|
+
self.event_bus.on("hook.*", handle_wildcard_hook_event)
|
|
59
|
+
|
|
60
|
+
# Add debug logging for verification
|
|
61
|
+
logger.info("[DirectRelay] Subscribed to hook.* events on EventBus")
|
|
62
|
+
logger.info(
|
|
63
|
+
f"[DirectRelay] Server broadcaster available: {self.server and self.server.broadcaster is not None}"
|
|
64
|
+
)
|
|
65
|
+
logger.info(f"[DirectRelay] EventBus instance: {self.event_bus is not None}")
|
|
66
|
+
|
|
67
|
+
# Mark as connected after successful subscription
|
|
68
|
+
self.connected = True
|
|
69
|
+
logger.info("[DirectRelay] Started and subscribed to hook events")
|
|
70
|
+
|
|
71
|
+
def _handle_hook_event(self, event_type: str, data: Any):
|
|
72
|
+
"""Internal method to handle hook events and broadcast them.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
event_type: The event type (e.g., "hook.pre_tool")
|
|
76
|
+
data: The event data
|
|
77
|
+
"""
|
|
78
|
+
try:
|
|
79
|
+
# Log the event reception
|
|
80
|
+
if self.debug:
|
|
81
|
+
logger.debug(f"[DirectRelay] Received event: {event_type}")
|
|
82
|
+
|
|
83
|
+
# Only relay hook events
|
|
84
|
+
if event_type.startswith("hook."):
|
|
85
|
+
# Extract the event subtype from the event_type (e.g., "hook.pre_tool" -> "pre_tool")
|
|
86
|
+
event_subtype = (
|
|
87
|
+
event_type.split(".", 1)[1] if "." in event_type else event_type
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# The data passed to us is the raw event data from the publisher
|
|
91
|
+
# We don't need to extract anything - just use it as is
|
|
92
|
+
actual_data = data
|
|
93
|
+
|
|
94
|
+
# Always log important hook events for debugging
|
|
95
|
+
if event_subtype in [
|
|
96
|
+
"pre_tool",
|
|
97
|
+
"post_tool",
|
|
98
|
+
"user_prompt",
|
|
99
|
+
"subagent_stop",
|
|
100
|
+
]:
|
|
101
|
+
logger.info(f"[DirectRelay] Processing {event_type} event")
|
|
102
|
+
|
|
103
|
+
# Use the server's broadcaster directly
|
|
104
|
+
if self.server and self.server.broadcaster:
|
|
105
|
+
# Log debug info about the broadcaster state
|
|
106
|
+
if self.debug:
|
|
107
|
+
has_sio = (
|
|
108
|
+
hasattr(self.server.broadcaster, "sio")
|
|
109
|
+
and self.server.broadcaster.sio is not None
|
|
110
|
+
)
|
|
111
|
+
has_loop = (
|
|
112
|
+
hasattr(self.server.broadcaster, "loop")
|
|
113
|
+
and self.server.broadcaster.loop is not None
|
|
114
|
+
)
|
|
115
|
+
logger.debug(
|
|
116
|
+
f"[DirectRelay] Broadcaster state - has_sio: {has_sio}, has_loop: {has_loop}"
|
|
117
|
+
)
|
|
118
|
+
logger.debug(
|
|
119
|
+
f"[DirectRelay] Event subtype: {event_subtype}, data keys: {list(actual_data.keys()) if isinstance(actual_data, dict) else 'not-dict'}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# The broadcaster's broadcast_event expects an event_type string and data dict
|
|
123
|
+
# The EventNormalizer will map dotted event names like "hook.pre_tool" correctly
|
|
124
|
+
# So we pass the full event_type (e.g., "hook.pre_tool") as the event name
|
|
125
|
+
# This way the normalizer will correctly extract type="hook" and subtype="pre_tool"
|
|
126
|
+
|
|
127
|
+
# Prepare the broadcast data - just the actual event data
|
|
128
|
+
broadcast_data = (
|
|
129
|
+
actual_data
|
|
130
|
+
if isinstance(actual_data, dict)
|
|
131
|
+
else {"data": actual_data}
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Use the full event_type (e.g., "hook.pre_tool") as the event name
|
|
135
|
+
# The normalizer handles dotted names and will extract type and subtype correctly
|
|
136
|
+
self.server.broadcaster.broadcast_event(event_type, broadcast_data)
|
|
137
|
+
|
|
138
|
+
self.stats["events_relayed"] += 1
|
|
139
|
+
self.stats["last_relay_time"] = datetime.now().isoformat()
|
|
140
|
+
|
|
141
|
+
if self.debug:
|
|
142
|
+
logger.debug(
|
|
143
|
+
f"[DirectRelay] Broadcasted hook event: {event_type}"
|
|
144
|
+
)
|
|
145
|
+
else:
|
|
146
|
+
logger.warning(
|
|
147
|
+
f"[DirectRelay] Server broadcaster not available for {event_type}"
|
|
148
|
+
)
|
|
149
|
+
self.stats["events_failed"] += 1
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
self.stats["events_failed"] += 1
|
|
153
|
+
logger.error(f"[DirectRelay] Failed to relay event {event_type}: {e}")
|
|
154
|
+
|
|
155
|
+
def stop(self) -> None:
|
|
156
|
+
"""Stop the relay."""
|
|
157
|
+
self.enabled = False
|
|
158
|
+
self.connected = False
|
|
159
|
+
# EventBus doesn't provide an off() method, so listeners remain
|
|
160
|
+
# but the enabled flag prevents processing
|
|
161
|
+
logger.info("[DirectRelay] Stopped")
|
|
162
|
+
|
|
163
|
+
def get_stats(self) -> dict:
|
|
164
|
+
"""Get relay statistics."""
|
|
165
|
+
return {
|
|
166
|
+
"enabled": self.enabled,
|
|
167
|
+
"connected": self.connected,
|
|
168
|
+
"events_relayed": self.stats["events_relayed"],
|
|
169
|
+
"events_failed": self.stats["events_failed"],
|
|
170
|
+
"last_relay_time": self.stats["last_relay_time"],
|
|
171
|
+
"has_server": self.server is not None,
|
|
172
|
+
"has_broadcaster": self.server and self.server.broadcaster is not None,
|
|
173
|
+
}
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Enhanced Connection Event Handler for Socket.IO.
|
|
3
|
+
|
|
4
|
+
WHY: This module provides robust connection handling with state tracking,
|
|
5
|
+
event replay on reconnection, and health monitoring integration.
|
|
6
|
+
|
|
7
|
+
DESIGN DECISION: Centralized connection event handling ensures consistent
|
|
8
|
+
state management and provides resilient event delivery across reconnections.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
|
|
13
|
+
from .base import BaseEventHandler
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class EnhancedConnectionEventHandler(BaseEventHandler):
|
|
17
|
+
"""
|
|
18
|
+
Handles Socket.IO connection events with enhanced robustness.
|
|
19
|
+
|
|
20
|
+
Features:
|
|
21
|
+
- Persistent client tracking across reconnections
|
|
22
|
+
- Event replay on reconnection
|
|
23
|
+
- Connection health monitoring
|
|
24
|
+
- Acknowledgment system for guaranteed delivery
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def register_events(self):
|
|
28
|
+
"""Register enhanced connection event handlers."""
|
|
29
|
+
sio = self.server.core.sio
|
|
30
|
+
|
|
31
|
+
@sio.event
|
|
32
|
+
async def connect(sid, environ, auth):
|
|
33
|
+
"""Handle client connection with enhanced tracking."""
|
|
34
|
+
try:
|
|
35
|
+
# Extract client ID from auth or create new
|
|
36
|
+
client_id = None
|
|
37
|
+
if auth and isinstance(auth, dict):
|
|
38
|
+
client_id = auth.get("client_id")
|
|
39
|
+
|
|
40
|
+
# Register connection with manager
|
|
41
|
+
if self.server.connection_manager:
|
|
42
|
+
conn = await self.server.connection_manager.register_connection(
|
|
43
|
+
sid, client_id
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Add to server's connected clients
|
|
47
|
+
self.server.connected_clients.add(sid)
|
|
48
|
+
self.server.stats["connections_total"] += 1
|
|
49
|
+
|
|
50
|
+
# Store client info
|
|
51
|
+
self.server.client_info[sid] = {
|
|
52
|
+
"client_id": conn.client_id,
|
|
53
|
+
"connected_at": datetime.now().isoformat(),
|
|
54
|
+
"user_agent": environ.get("HTTP_USER_AGENT", "unknown"),
|
|
55
|
+
"remote_addr": environ.get("REMOTE_ADDR", "unknown"),
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
# Send client ID back for future reconnections
|
|
59
|
+
await sio.emit(
|
|
60
|
+
"connection_established",
|
|
61
|
+
{
|
|
62
|
+
"client_id": conn.client_id,
|
|
63
|
+
"sid": sid,
|
|
64
|
+
"timestamp": datetime.now().isoformat(),
|
|
65
|
+
"server_version": self.get_server_version(),
|
|
66
|
+
},
|
|
67
|
+
room=sid,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Send current server status
|
|
71
|
+
await self._send_server_status(sid)
|
|
72
|
+
|
|
73
|
+
# Check for events to replay
|
|
74
|
+
last_sequence = 0
|
|
75
|
+
if auth and isinstance(auth, dict):
|
|
76
|
+
last_sequence = auth.get("last_sequence", 0)
|
|
77
|
+
|
|
78
|
+
if last_sequence > 0:
|
|
79
|
+
replay_events = (
|
|
80
|
+
await self.server.connection_manager.get_replay_events(
|
|
81
|
+
sid, last_sequence
|
|
82
|
+
)
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if replay_events:
|
|
86
|
+
self.logger.info(
|
|
87
|
+
f"Replaying {len(replay_events)} events for client {conn.client_id}"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Send replay events in batch
|
|
91
|
+
await sio.emit(
|
|
92
|
+
"event_replay",
|
|
93
|
+
{
|
|
94
|
+
"events": replay_events,
|
|
95
|
+
"count": len(replay_events),
|
|
96
|
+
"from_sequence": last_sequence,
|
|
97
|
+
},
|
|
98
|
+
room=sid,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Send event history for initial population
|
|
102
|
+
if (
|
|
103
|
+
hasattr(self.server, "event_history")
|
|
104
|
+
and self.server.event_history
|
|
105
|
+
):
|
|
106
|
+
history_data = list(self.server.event_history)
|
|
107
|
+
await sio.emit(
|
|
108
|
+
"event_history",
|
|
109
|
+
{"events": history_data, "count": len(history_data)},
|
|
110
|
+
room=sid,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
self.logger.info(
|
|
114
|
+
f"Client connected: {conn.client_id} (sid: {sid}, "
|
|
115
|
+
f"reconnect: {conn.metrics.reconnect_count > 0})"
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
# Fallback to basic connection tracking
|
|
119
|
+
self.server.connected_clients.add(sid)
|
|
120
|
+
self.server.stats["connections_total"] += 1
|
|
121
|
+
|
|
122
|
+
self.server.client_info[sid] = {
|
|
123
|
+
"connected_at": datetime.now().isoformat(),
|
|
124
|
+
"user_agent": environ.get("HTTP_USER_AGENT", "unknown"),
|
|
125
|
+
"remote_addr": environ.get("REMOTE_ADDR", "unknown"),
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
await self._send_server_status(sid)
|
|
129
|
+
|
|
130
|
+
if (
|
|
131
|
+
hasattr(self.server, "event_history")
|
|
132
|
+
and self.server.event_history
|
|
133
|
+
):
|
|
134
|
+
history_data = list(self.server.event_history)
|
|
135
|
+
await sio.emit(
|
|
136
|
+
"event_history",
|
|
137
|
+
{"events": history_data, "count": len(history_data)},
|
|
138
|
+
room=sid,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
self.logger.info(f"Client connected: {sid}")
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
self.logger.error(f"Error handling connection for {sid}: {e}")
|
|
145
|
+
|
|
146
|
+
@sio.event
|
|
147
|
+
async def disconnect(sid):
|
|
148
|
+
"""Handle client disconnection with state preservation."""
|
|
149
|
+
try:
|
|
150
|
+
# Get disconnection reason if available
|
|
151
|
+
reason = "client_disconnect"
|
|
152
|
+
|
|
153
|
+
# Unregister from connection manager but preserve state
|
|
154
|
+
if self.server.connection_manager:
|
|
155
|
+
await self.server.connection_manager.unregister_connection(
|
|
156
|
+
sid, reason
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Remove from connected clients
|
|
160
|
+
if sid in self.server.connected_clients:
|
|
161
|
+
self.server.connected_clients.remove(sid)
|
|
162
|
+
|
|
163
|
+
# Remove client info
|
|
164
|
+
if sid in self.server.client_info:
|
|
165
|
+
client_info = self.server.client_info[sid]
|
|
166
|
+
del self.server.client_info[sid]
|
|
167
|
+
|
|
168
|
+
client_id = client_info.get("client_id", sid)
|
|
169
|
+
self.logger.info(f"Client disconnected: {client_id} (sid: {sid})")
|
|
170
|
+
else:
|
|
171
|
+
self.logger.info(f"Client disconnected: {sid}")
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
self.logger.error(f"Error handling disconnection for {sid}: {e}")
|
|
175
|
+
|
|
176
|
+
@sio.event
|
|
177
|
+
async def ping(sid):
|
|
178
|
+
"""Handle ping from client for health monitoring."""
|
|
179
|
+
try:
|
|
180
|
+
# Update activity in connection manager
|
|
181
|
+
if self.server.connection_manager:
|
|
182
|
+
await self.server.connection_manager.update_activity(sid, "ping")
|
|
183
|
+
|
|
184
|
+
# Send pong response with timestamp
|
|
185
|
+
await sio.emit(
|
|
186
|
+
"pong",
|
|
187
|
+
{
|
|
188
|
+
"timestamp": datetime.now().isoformat(),
|
|
189
|
+
"server_time": datetime.now().timestamp(),
|
|
190
|
+
},
|
|
191
|
+
room=sid,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
except Exception as e:
|
|
195
|
+
self.logger.error(f"Error handling ping from {sid}: {e}")
|
|
196
|
+
|
|
197
|
+
@sio.event
|
|
198
|
+
async def acknowledge_event(sid, data):
|
|
199
|
+
"""Handle event acknowledgment from client."""
|
|
200
|
+
try:
|
|
201
|
+
if not isinstance(data, dict):
|
|
202
|
+
return
|
|
203
|
+
|
|
204
|
+
sequence = data.get("sequence")
|
|
205
|
+
if sequence and self.server.connection_manager:
|
|
206
|
+
await self.server.connection_manager.acknowledge_event(
|
|
207
|
+
sid, sequence
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Optional: Send confirmation
|
|
211
|
+
await sio.emit(
|
|
212
|
+
"ack_confirmed",
|
|
213
|
+
{"sequence": sequence, "timestamp": datetime.now().isoformat()},
|
|
214
|
+
room=sid,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
except Exception as e:
|
|
218
|
+
self.logger.error(f"Error handling acknowledgment from {sid}: {e}")
|
|
219
|
+
|
|
220
|
+
@sio.event
|
|
221
|
+
async def request_replay(sid, data):
|
|
222
|
+
"""Handle replay request from client after reconnection."""
|
|
223
|
+
try:
|
|
224
|
+
if not isinstance(data, dict):
|
|
225
|
+
return
|
|
226
|
+
|
|
227
|
+
last_sequence = data.get("last_sequence", 0)
|
|
228
|
+
|
|
229
|
+
if self.server.connection_manager:
|
|
230
|
+
replay_events = (
|
|
231
|
+
await self.server.connection_manager.get_replay_events(
|
|
232
|
+
sid, last_sequence
|
|
233
|
+
)
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
if replay_events:
|
|
237
|
+
await sio.emit(
|
|
238
|
+
"event_replay",
|
|
239
|
+
{
|
|
240
|
+
"events": replay_events,
|
|
241
|
+
"count": len(replay_events),
|
|
242
|
+
"from_sequence": last_sequence,
|
|
243
|
+
},
|
|
244
|
+
room=sid,
|
|
245
|
+
)
|
|
246
|
+
else:
|
|
247
|
+
await sio.emit(
|
|
248
|
+
"event_replay",
|
|
249
|
+
{
|
|
250
|
+
"events": [],
|
|
251
|
+
"count": 0,
|
|
252
|
+
"from_sequence": last_sequence,
|
|
253
|
+
"message": "No events to replay",
|
|
254
|
+
},
|
|
255
|
+
room=sid,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
except Exception as e:
|
|
259
|
+
self.logger.error(f"Error handling replay request from {sid}: {e}")
|
|
260
|
+
|
|
261
|
+
@sio.event
|
|
262
|
+
async def get_connection_stats(sid):
|
|
263
|
+
"""Get connection statistics for debugging."""
|
|
264
|
+
try:
|
|
265
|
+
stats = {
|
|
266
|
+
"timestamp": datetime.now().isoformat(),
|
|
267
|
+
"total_connections": len(self.server.connected_clients),
|
|
268
|
+
"server_stats": self.server.stats,
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
if self.server.connection_manager:
|
|
272
|
+
conn = self.server.connection_manager.get_connection(sid)
|
|
273
|
+
if conn:
|
|
274
|
+
stats["connection"] = {
|
|
275
|
+
"client_id": conn.client_id,
|
|
276
|
+
"state": conn.state.value,
|
|
277
|
+
"connected_at": conn.connected_at,
|
|
278
|
+
"quality": conn.calculate_quality(),
|
|
279
|
+
"metrics": {
|
|
280
|
+
"events_sent": conn.metrics.events_sent,
|
|
281
|
+
"events_acked": conn.metrics.events_acked,
|
|
282
|
+
"events_buffered": conn.metrics.events_buffered,
|
|
283
|
+
"reconnect_count": conn.metrics.reconnect_count,
|
|
284
|
+
},
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
stats["manager_metrics"] = (
|
|
288
|
+
self.server.connection_manager.get_metrics()
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
await sio.emit("connection_stats", stats, room=sid)
|
|
292
|
+
|
|
293
|
+
except Exception as e:
|
|
294
|
+
self.logger.error(f"Error getting connection stats for {sid}: {e}")
|
|
295
|
+
|
|
296
|
+
@sio.event
|
|
297
|
+
async def heartbeat(sid):
|
|
298
|
+
"""Handle client heartbeat for connection monitoring."""
|
|
299
|
+
try:
|
|
300
|
+
# Update activity
|
|
301
|
+
if self.server.connection_manager:
|
|
302
|
+
await self.server.connection_manager.update_activity(sid, "event")
|
|
303
|
+
|
|
304
|
+
# Send heartbeat response
|
|
305
|
+
await sio.emit(
|
|
306
|
+
"heartbeat_response",
|
|
307
|
+
{"timestamp": datetime.now().isoformat(), "status": "alive"},
|
|
308
|
+
room=sid,
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
self.logger.error(f"Error handling heartbeat from {sid}: {e}")
|
|
313
|
+
|
|
314
|
+
self.logger.info("Enhanced connection event handlers registered")
|
|
315
|
+
|
|
316
|
+
async def _send_server_status(self, sid: str):
|
|
317
|
+
"""Send current server status to a client."""
|
|
318
|
+
try:
|
|
319
|
+
status_data = {
|
|
320
|
+
"server_running": self.server.running,
|
|
321
|
+
"claude_status": self.server.claude_status,
|
|
322
|
+
"claude_pid": self.server.claude_pid,
|
|
323
|
+
"session_id": self.server.session_id,
|
|
324
|
+
"connected_clients": len(self.server.connected_clients),
|
|
325
|
+
"server_start_time": (
|
|
326
|
+
self.server.stats.get("start_time").isoformat()
|
|
327
|
+
if self.server.stats.get("start_time")
|
|
328
|
+
else None
|
|
329
|
+
),
|
|
330
|
+
"timestamp": datetime.now().isoformat(),
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
await self.server.core.sio.emit("server_status", status_data, room=sid)
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
self.logger.error(f"Error sending server status to {sid}: {e}")
|
|
337
|
+
|
|
338
|
+
def get_server_version(self) -> str:
|
|
339
|
+
"""Get server version for client info."""
|
|
340
|
+
try:
|
|
341
|
+
from claude_mpm.services.version_service import VersionService
|
|
342
|
+
|
|
343
|
+
return VersionService().get_version()
|
|
344
|
+
except:
|
|
345
|
+
return "unknown"
|
|
@@ -153,6 +153,7 @@ class SocketIOEventBroadcaster:
|
|
|
153
153
|
stats: Dict[str, Any],
|
|
154
154
|
logger,
|
|
155
155
|
server=None, # Add server reference for event history access
|
|
156
|
+
connection_manager=None, # Add connection manager for robust delivery
|
|
156
157
|
):
|
|
157
158
|
self.sio = sio
|
|
158
159
|
self.connected_clients = connected_clients
|
|
@@ -162,6 +163,7 @@ class SocketIOEventBroadcaster:
|
|
|
162
163
|
self.logger = logger
|
|
163
164
|
self.loop = None # Will be set by main server
|
|
164
165
|
self.server = server # Reference to main server for event history
|
|
166
|
+
self.connection_manager = connection_manager # For connection tracking
|
|
165
167
|
|
|
166
168
|
# Initialize retry queue for resilient delivery
|
|
167
169
|
self.retry_queue = RetryQueue(max_size=1000)
|
|
@@ -313,7 +315,7 @@ class SocketIOEventBroadcaster:
|
|
|
313
315
|
|
|
314
316
|
WHY: Enhanced with retry queue to ensure reliable delivery
|
|
315
317
|
even during transient network issues. Now uses EventNormalizer
|
|
316
|
-
to ensure consistent event schema.
|
|
318
|
+
to ensure consistent event schema and ConnectionManager for tracking.
|
|
317
319
|
"""
|
|
318
320
|
if not self.sio:
|
|
319
321
|
return
|
|
@@ -342,6 +344,18 @@ class SocketIOEventBroadcaster:
|
|
|
342
344
|
f"Added {event['type']}/{event['subtype']} to history (total: {len(self.server.event_history)})"
|
|
343
345
|
)
|
|
344
346
|
|
|
347
|
+
# If we have a connection manager, buffer event for all connected clients
|
|
348
|
+
if self.connection_manager and self.loop:
|
|
349
|
+
# Buffer for each connected client asynchronously
|
|
350
|
+
async def buffer_for_clients():
|
|
351
|
+
for sid in list(self.connected_clients):
|
|
352
|
+
await self.connection_manager.buffer_event(sid, event)
|
|
353
|
+
|
|
354
|
+
try:
|
|
355
|
+
asyncio.run_coroutine_threadsafe(buffer_for_clients(), self.loop)
|
|
356
|
+
except Exception as e:
|
|
357
|
+
self.logger.warning(f"Failed to buffer event for clients: {e}")
|
|
358
|
+
|
|
345
359
|
# Broadcast to all connected clients
|
|
346
360
|
broadcast_success = False
|
|
347
361
|
try:
|
|
@@ -360,6 +374,23 @@ class SocketIOEventBroadcaster:
|
|
|
360
374
|
future.result(timeout=0.5) # 500ms timeout
|
|
361
375
|
broadcast_success = True
|
|
362
376
|
self.stats["events_sent"] += 1
|
|
377
|
+
|
|
378
|
+
# Update activity for all connected clients
|
|
379
|
+
if self.connection_manager:
|
|
380
|
+
|
|
381
|
+
async def update_activities():
|
|
382
|
+
for sid in list(self.connected_clients):
|
|
383
|
+
await self.connection_manager.update_activity(
|
|
384
|
+
sid, "event"
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
asyncio.run_coroutine_threadsafe(
|
|
389
|
+
update_activities(), self.loop
|
|
390
|
+
)
|
|
391
|
+
except:
|
|
392
|
+
pass # Non-critical
|
|
393
|
+
|
|
363
394
|
self.logger.debug(f"Broadcasted event: {event_type}")
|
|
364
395
|
except:
|
|
365
396
|
# Will be added to retry queue below
|