empathy-framework 4.9.0__py3-none-any.whl → 5.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/METADATA +64 -25
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/RECORD +47 -26
- empathy_os/__init__.py +2 -2
- empathy_os/cache/hash_only.py +6 -3
- empathy_os/cache/hybrid.py +6 -3
- empathy_os/cli_legacy.py +27 -1
- empathy_os/cli_minimal.py +512 -15
- empathy_os/cli_router.py +145 -113
- empathy_os/cli_unified.py +25 -0
- empathy_os/dashboard/__init__.py +42 -0
- empathy_os/dashboard/app.py +512 -0
- empathy_os/dashboard/simple_server.py +403 -0
- empathy_os/dashboard/standalone_server.py +536 -0
- empathy_os/memory/__init__.py +19 -5
- empathy_os/memory/short_term.py +4 -70
- empathy_os/memory/types.py +2 -2
- empathy_os/models/__init__.py +3 -0
- empathy_os/models/adaptive_routing.py +437 -0
- empathy_os/models/registry.py +4 -4
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/telemetry/__init__.py +29 -1
- empathy_os/telemetry/agent_coordination.py +478 -0
- empathy_os/telemetry/agent_tracking.py +350 -0
- empathy_os/telemetry/approval_gates.py +563 -0
- empathy_os/telemetry/event_streaming.py +405 -0
- empathy_os/telemetry/feedback_loop.py +557 -0
- empathy_os/vscode_bridge 2.py +173 -0
- empathy_os/workflows/__init__.py +4 -4
- empathy_os/workflows/base.py +495 -43
- empathy_os/workflows/history.py +3 -5
- empathy_os/workflows/output.py +410 -0
- empathy_os/workflows/progress.py +324 -22
- empathy_os/workflows/progressive/README 2.md +454 -0
- empathy_os/workflows/progressive/__init__ 2.py +92 -0
- empathy_os/workflows/progressive/cli 2.py +242 -0
- empathy_os/workflows/progressive/core 2.py +488 -0
- empathy_os/workflows/progressive/orchestrator 2.py +701 -0
- empathy_os/workflows/progressive/reports 2.py +528 -0
- empathy_os/workflows/progressive/telemetry 2.py +280 -0
- empathy_os/workflows/progressive/test_gen 2.py +514 -0
- empathy_os/workflows/progressive/workflow 2.py +628 -0
- empathy_os/workflows/routing.py +5 -0
- empathy_os/workflows/security_audit.py +189 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,536 @@
|
|
|
1
|
+
"""Standalone Dashboard Server - Reads Directly from Redis.
|
|
2
|
+
|
|
3
|
+
This version bypasses the telemetry API layer and reads directly from Redis.
|
|
4
|
+
Works with data populated by scripts/populate_redis_direct.py.
|
|
5
|
+
|
|
6
|
+
Zero external dependencies (uses Python stdlib only).
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart-AI-Memory
|
|
9
|
+
Licensed under Fair Source License 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from http.server import BaseHTTPRequestHandler, HTTPServer
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from urllib.parse import parse_qs, urlparse
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
import redis
|
|
23
|
+
|
|
24
|
+
REDIS_AVAILABLE = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
REDIS_AVAILABLE = False
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
32
|
+
"""HTTP handler that reads directly from Redis."""
|
|
33
|
+
|
|
34
|
+
# Class variable for Redis connection (shared across requests)
|
|
35
|
+
_redis_client = None
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
def get_redis(cls):
|
|
39
|
+
"""Get or create Redis connection."""
|
|
40
|
+
if not REDIS_AVAILABLE:
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
if cls._redis_client is None:
|
|
44
|
+
try:
|
|
45
|
+
cls._redis_client = redis.Redis(host="localhost", port=6379, decode_responses=False)
|
|
46
|
+
cls._redis_client.ping() # Test connection
|
|
47
|
+
except Exception as e:
|
|
48
|
+
logger.error(f"Failed to connect to Redis: {e}")
|
|
49
|
+
cls._redis_client = None
|
|
50
|
+
|
|
51
|
+
return cls._redis_client
|
|
52
|
+
|
|
53
|
+
def do_GET(self):
|
|
54
|
+
"""Handle GET requests."""
|
|
55
|
+
parsed = urlparse(self.path)
|
|
56
|
+
path = parsed.path
|
|
57
|
+
query = parse_qs(parsed.query)
|
|
58
|
+
|
|
59
|
+
# Route requests
|
|
60
|
+
if path == "/" or path == "/index.html":
|
|
61
|
+
self.serve_file("index.html", "text/html")
|
|
62
|
+
elif path == "/static/style.css":
|
|
63
|
+
self.serve_file("style.css", "text/css")
|
|
64
|
+
elif path == "/static/app.js":
|
|
65
|
+
self.serve_file("app.js", "application/javascript")
|
|
66
|
+
elif path == "/api/health":
|
|
67
|
+
self.api_health()
|
|
68
|
+
elif path == "/api/agents":
|
|
69
|
+
self.api_agents()
|
|
70
|
+
elif path.startswith("/api/agents/"):
|
|
71
|
+
agent_id = path.split("/")[-1]
|
|
72
|
+
self.api_agent_detail(agent_id)
|
|
73
|
+
elif path == "/api/signals":
|
|
74
|
+
limit = int(query.get("limit", [50])[0])
|
|
75
|
+
self.api_signals(limit)
|
|
76
|
+
elif path == "/api/events":
|
|
77
|
+
event_type = query.get("event_type", [None])[0]
|
|
78
|
+
limit = int(query.get("limit", [100])[0])
|
|
79
|
+
self.api_events(event_type, limit)
|
|
80
|
+
elif path == "/api/approvals":
|
|
81
|
+
self.api_approvals()
|
|
82
|
+
elif path == "/api/feedback/workflows":
|
|
83
|
+
self.api_feedback_workflows()
|
|
84
|
+
elif path == "/api/feedback/underperforming":
|
|
85
|
+
threshold = float(query.get("threshold", [0.7])[0])
|
|
86
|
+
self.api_underperforming(threshold)
|
|
87
|
+
else:
|
|
88
|
+
self.send_error(404, "Not Found")
|
|
89
|
+
|
|
90
|
+
def do_POST(self):
|
|
91
|
+
"""Handle POST requests."""
|
|
92
|
+
parsed = urlparse(self.path)
|
|
93
|
+
path = parsed.path
|
|
94
|
+
|
|
95
|
+
# Get request body
|
|
96
|
+
content_length = int(self.headers.get("Content-Length", 0))
|
|
97
|
+
body = self.rfile.read(content_length) if content_length > 0 else b"{}"
|
|
98
|
+
data = json.loads(body.decode("utf-8")) if body else {}
|
|
99
|
+
|
|
100
|
+
# Route requests
|
|
101
|
+
if "/approve" in path:
|
|
102
|
+
request_id = path.split("/")[-2]
|
|
103
|
+
self.api_approve(request_id, data.get("reason", "Approved via dashboard"))
|
|
104
|
+
elif "/reject" in path:
|
|
105
|
+
request_id = path.split("/")[-2]
|
|
106
|
+
self.api_reject(request_id, data.get("reason", "Rejected via dashboard"))
|
|
107
|
+
else:
|
|
108
|
+
self.send_error(404, "Not Found")
|
|
109
|
+
|
|
110
|
+
def serve_file(self, filename: str, content_type: str):
|
|
111
|
+
"""Serve static file."""
|
|
112
|
+
try:
|
|
113
|
+
static_dir = Path(__file__).parent / "static"
|
|
114
|
+
file_path = static_dir / filename
|
|
115
|
+
|
|
116
|
+
if not file_path.exists():
|
|
117
|
+
self.send_error(404, f"File not found: {filename}")
|
|
118
|
+
return
|
|
119
|
+
|
|
120
|
+
content = file_path.read_bytes()
|
|
121
|
+
|
|
122
|
+
self.send_response(200)
|
|
123
|
+
self.send_header("Content-Type", content_type)
|
|
124
|
+
self.send_header("Content-Length", str(len(content)))
|
|
125
|
+
self.end_headers()
|
|
126
|
+
self.wfile.write(content)
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Failed to serve file {filename}: {e}")
|
|
130
|
+
self.send_error(500, str(e))
|
|
131
|
+
|
|
132
|
+
def send_json(self, data: dict | list, status: int = 200):
|
|
133
|
+
"""Send JSON response."""
|
|
134
|
+
try:
|
|
135
|
+
content = json.dumps(data).encode("utf-8")
|
|
136
|
+
|
|
137
|
+
self.send_response(status)
|
|
138
|
+
self.send_header("Content-Type", "application/json")
|
|
139
|
+
self.send_header("Content-Length", str(len(content)))
|
|
140
|
+
self.send_header("Access-Control-Allow-Origin", "*") # CORS
|
|
141
|
+
self.end_headers()
|
|
142
|
+
self.wfile.write(content)
|
|
143
|
+
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.error(f"Failed to send JSON: {e}")
|
|
146
|
+
self.send_error(500, str(e))
|
|
147
|
+
|
|
148
|
+
# ========================================================================
|
|
149
|
+
# API Endpoints - Read Directly from Redis
|
|
150
|
+
# ========================================================================
|
|
151
|
+
|
|
152
|
+
def api_health(self):
|
|
153
|
+
"""System health endpoint."""
|
|
154
|
+
try:
|
|
155
|
+
r = self.get_redis()
|
|
156
|
+
has_redis = r is not None
|
|
157
|
+
|
|
158
|
+
if has_redis:
|
|
159
|
+
# Count keys directly
|
|
160
|
+
heartbeat_count = len(r.keys(b"heartbeat:*"))
|
|
161
|
+
approval_count = len(r.keys(b"approval:pending:*"))
|
|
162
|
+
else:
|
|
163
|
+
heartbeat_count = 0
|
|
164
|
+
approval_count = 0
|
|
165
|
+
|
|
166
|
+
self.send_json(
|
|
167
|
+
{
|
|
168
|
+
"status": "healthy" if has_redis else "degraded",
|
|
169
|
+
"redis_available": has_redis,
|
|
170
|
+
"active_agents": heartbeat_count,
|
|
171
|
+
"pending_approvals": approval_count,
|
|
172
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
173
|
+
}
|
|
174
|
+
)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
self.send_json({"status": "error", "error": str(e)}, status=500)
|
|
177
|
+
|
|
178
|
+
def api_agents(self):
|
|
179
|
+
"""List active agents."""
|
|
180
|
+
try:
|
|
181
|
+
r = self.get_redis()
|
|
182
|
+
if not r:
|
|
183
|
+
self.send_json([])
|
|
184
|
+
return
|
|
185
|
+
|
|
186
|
+
result = []
|
|
187
|
+
for key in r.keys(b"heartbeat:*"):
|
|
188
|
+
try:
|
|
189
|
+
data = r.get(key)
|
|
190
|
+
if data:
|
|
191
|
+
heartbeat = json.loads(data.decode("utf-8"))
|
|
192
|
+
result.append(
|
|
193
|
+
{
|
|
194
|
+
"agent_id": heartbeat.get("agent_id"),
|
|
195
|
+
"status": heartbeat.get("status"),
|
|
196
|
+
"last_seen": heartbeat.get("timestamp"),
|
|
197
|
+
"progress": heartbeat.get("progress", 0.0),
|
|
198
|
+
"current_task": heartbeat.get("current_task", "Unknown"),
|
|
199
|
+
}
|
|
200
|
+
)
|
|
201
|
+
except Exception as e:
|
|
202
|
+
logger.error(f"Failed to parse heartbeat {key}: {e}")
|
|
203
|
+
|
|
204
|
+
self.send_json(result)
|
|
205
|
+
except Exception as e:
|
|
206
|
+
logger.error(f"Failed to get agents: {e}")
|
|
207
|
+
self.send_json([], status=500)
|
|
208
|
+
|
|
209
|
+
def api_agent_detail(self, agent_id: str):
|
|
210
|
+
"""Get specific agent details."""
|
|
211
|
+
try:
|
|
212
|
+
r = self.get_redis()
|
|
213
|
+
if not r:
|
|
214
|
+
self.send_json({"error": "Redis not available"}, status=503)
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
key = f"heartbeat:{agent_id}".encode()
|
|
218
|
+
data = r.get(key)
|
|
219
|
+
|
|
220
|
+
if not data:
|
|
221
|
+
self.send_json({"error": f"Agent {agent_id} not found"}, status=404)
|
|
222
|
+
return
|
|
223
|
+
|
|
224
|
+
heartbeat = json.loads(data.decode("utf-8"))
|
|
225
|
+
self.send_json(
|
|
226
|
+
{
|
|
227
|
+
"agent_id": heartbeat.get("agent_id"),
|
|
228
|
+
"status": heartbeat.get("status"),
|
|
229
|
+
"last_seen": heartbeat.get("timestamp"),
|
|
230
|
+
"progress": heartbeat.get("progress", 0.0),
|
|
231
|
+
"current_task": heartbeat.get("current_task"),
|
|
232
|
+
"metadata": heartbeat.get("metadata", {}),
|
|
233
|
+
}
|
|
234
|
+
)
|
|
235
|
+
except Exception as e:
|
|
236
|
+
self.send_json({"error": str(e)}, status=500)
|
|
237
|
+
|
|
238
|
+
def api_signals(self, limit: int):
|
|
239
|
+
"""Get recent coordination signals."""
|
|
240
|
+
try:
|
|
241
|
+
r = self.get_redis()
|
|
242
|
+
if not r:
|
|
243
|
+
self.send_json([])
|
|
244
|
+
return
|
|
245
|
+
|
|
246
|
+
result = []
|
|
247
|
+
for key in r.keys(b"empathy:signal:*")[:limit]:
|
|
248
|
+
try:
|
|
249
|
+
data = r.get(key)
|
|
250
|
+
if data:
|
|
251
|
+
signal = json.loads(data.decode("utf-8"))
|
|
252
|
+
result.append(
|
|
253
|
+
{
|
|
254
|
+
"signal_type": signal.get("signal_type"),
|
|
255
|
+
"source_agent": signal.get("source_agent"),
|
|
256
|
+
"target_agent": signal.get("target_agent"),
|
|
257
|
+
"timestamp": signal.get("timestamp"),
|
|
258
|
+
"payload": signal.get("payload", {}),
|
|
259
|
+
}
|
|
260
|
+
)
|
|
261
|
+
except Exception as e:
|
|
262
|
+
logger.error(f"Failed to parse signal {key}: {e}")
|
|
263
|
+
|
|
264
|
+
# Sort by timestamp (newest first)
|
|
265
|
+
result.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
|
|
266
|
+
self.send_json(result[:limit])
|
|
267
|
+
except Exception as e:
|
|
268
|
+
logger.error(f"Failed to get signals: {e}")
|
|
269
|
+
self.send_json([])
|
|
270
|
+
|
|
271
|
+
def api_events(self, event_type: str | None, limit: int):
|
|
272
|
+
"""Get recent events."""
|
|
273
|
+
try:
|
|
274
|
+
r = self.get_redis()
|
|
275
|
+
if not r:
|
|
276
|
+
self.send_json([])
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
result = []
|
|
280
|
+
|
|
281
|
+
# Get from streams
|
|
282
|
+
stream_patterns = [
|
|
283
|
+
b"stream:workflow_progress",
|
|
284
|
+
b"stream:agent_heartbeat",
|
|
285
|
+
b"stream:coordination_signal",
|
|
286
|
+
]
|
|
287
|
+
|
|
288
|
+
for stream_key in stream_patterns:
|
|
289
|
+
try:
|
|
290
|
+
# Get last N entries from stream
|
|
291
|
+
entries = r.xrevrange(stream_key, count=limit)
|
|
292
|
+
for entry_id, fields in entries:
|
|
293
|
+
if b"data" in fields:
|
|
294
|
+
event = json.loads(fields[b"data"].decode("utf-8"))
|
|
295
|
+
result.append(
|
|
296
|
+
{
|
|
297
|
+
"event_id": event.get("event_id"),
|
|
298
|
+
"event_type": event.get("event_type"),
|
|
299
|
+
"timestamp": event.get("timestamp"),
|
|
300
|
+
"data": event.get("data", {}),
|
|
301
|
+
"source": event.get("source"),
|
|
302
|
+
}
|
|
303
|
+
)
|
|
304
|
+
except Exception as e:
|
|
305
|
+
logger.debug(f"Stream {stream_key} not found or empty: {e}")
|
|
306
|
+
|
|
307
|
+
# Sort by timestamp
|
|
308
|
+
result.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
|
|
309
|
+
self.send_json(result[:limit])
|
|
310
|
+
except Exception as e:
|
|
311
|
+
logger.error(f"Failed to get events: {e}")
|
|
312
|
+
self.send_json([])
|
|
313
|
+
|
|
314
|
+
def api_approvals(self):
|
|
315
|
+
"""Get pending approvals."""
|
|
316
|
+
try:
|
|
317
|
+
r = self.get_redis()
|
|
318
|
+
if not r:
|
|
319
|
+
self.send_json([])
|
|
320
|
+
return
|
|
321
|
+
|
|
322
|
+
result = []
|
|
323
|
+
for key in r.keys(b"approval:pending:*"):
|
|
324
|
+
try:
|
|
325
|
+
data = r.get(key)
|
|
326
|
+
if data:
|
|
327
|
+
approval = json.loads(data.decode("utf-8"))
|
|
328
|
+
result.append(
|
|
329
|
+
{
|
|
330
|
+
"request_id": approval.get("request_id"),
|
|
331
|
+
"approval_type": approval.get("approval_type"),
|
|
332
|
+
"agent_id": approval.get("agent_id"),
|
|
333
|
+
"context": approval.get("context", {}),
|
|
334
|
+
"timestamp": approval.get("timestamp"),
|
|
335
|
+
"timeout_seconds": approval.get("timeout_seconds", 300),
|
|
336
|
+
}
|
|
337
|
+
)
|
|
338
|
+
except Exception as e:
|
|
339
|
+
logger.error(f"Failed to parse approval {key}: {e}")
|
|
340
|
+
|
|
341
|
+
self.send_json(result)
|
|
342
|
+
except Exception as e:
|
|
343
|
+
logger.error(f"Failed to get approvals: {e}")
|
|
344
|
+
self.send_json([])
|
|
345
|
+
|
|
346
|
+
def api_approve(self, request_id: str, reason: str):
|
|
347
|
+
"""Approve request."""
|
|
348
|
+
try:
|
|
349
|
+
r = self.get_redis()
|
|
350
|
+
if not r:
|
|
351
|
+
self.send_json({"error": "Redis not available"}, status=503)
|
|
352
|
+
return
|
|
353
|
+
|
|
354
|
+
# Delete from pending
|
|
355
|
+
key = f"approval:pending:{request_id}".encode()
|
|
356
|
+
if r.delete(key):
|
|
357
|
+
self.send_json({"status": "approved", "request_id": request_id})
|
|
358
|
+
else:
|
|
359
|
+
self.send_json({"error": "Request not found"}, status=404)
|
|
360
|
+
except Exception as e:
|
|
361
|
+
self.send_json({"error": str(e)}, status=500)
|
|
362
|
+
|
|
363
|
+
def api_reject(self, request_id: str, reason: str):
|
|
364
|
+
"""Reject request."""
|
|
365
|
+
try:
|
|
366
|
+
r = self.get_redis()
|
|
367
|
+
if not r:
|
|
368
|
+
self.send_json({"error": "Redis not available"}, status=503)
|
|
369
|
+
return
|
|
370
|
+
|
|
371
|
+
# Delete from pending
|
|
372
|
+
key = f"approval:pending:{request_id}".encode()
|
|
373
|
+
if r.delete(key):
|
|
374
|
+
self.send_json({"status": "rejected", "request_id": request_id})
|
|
375
|
+
else:
|
|
376
|
+
self.send_json({"error": "Request not found"}, status=404)
|
|
377
|
+
except Exception as e:
|
|
378
|
+
self.send_json({"error": str(e)}, status=500)
|
|
379
|
+
|
|
380
|
+
def api_feedback_workflows(self):
|
|
381
|
+
"""Get workflow quality metrics."""
|
|
382
|
+
try:
|
|
383
|
+
r = self.get_redis()
|
|
384
|
+
if not r:
|
|
385
|
+
self.send_json([])
|
|
386
|
+
return
|
|
387
|
+
|
|
388
|
+
# Group feedback by workflow/stage/tier
|
|
389
|
+
feedback_groups = {}
|
|
390
|
+
|
|
391
|
+
for key in r.keys(b"feedback:*"):
|
|
392
|
+
try:
|
|
393
|
+
data = r.get(key)
|
|
394
|
+
if data:
|
|
395
|
+
feedback = json.loads(data.decode("utf-8"))
|
|
396
|
+
workflow = feedback.get("workflow_name")
|
|
397
|
+
stage = feedback.get("stage_name")
|
|
398
|
+
tier = feedback.get("tier")
|
|
399
|
+
quality = feedback.get("quality_score")
|
|
400
|
+
|
|
401
|
+
group_key = f"{workflow}/{stage}/{tier}"
|
|
402
|
+
if group_key not in feedback_groups:
|
|
403
|
+
feedback_groups[group_key] = {
|
|
404
|
+
"workflow_name": workflow,
|
|
405
|
+
"stage_name": stage,
|
|
406
|
+
"tier": tier,
|
|
407
|
+
"qualities": [],
|
|
408
|
+
}
|
|
409
|
+
feedback_groups[group_key]["qualities"].append(quality)
|
|
410
|
+
except Exception as e:
|
|
411
|
+
logger.error(f"Failed to parse feedback {key}: {e}")
|
|
412
|
+
|
|
413
|
+
# Calculate stats
|
|
414
|
+
result = []
|
|
415
|
+
for group_key, group in feedback_groups.items():
|
|
416
|
+
qualities = group["qualities"]
|
|
417
|
+
if qualities:
|
|
418
|
+
avg_quality = sum(qualities) / len(qualities)
|
|
419
|
+
result.append(
|
|
420
|
+
{
|
|
421
|
+
"workflow_name": group["workflow_name"],
|
|
422
|
+
"stage_name": group["stage_name"],
|
|
423
|
+
"tier": group["tier"],
|
|
424
|
+
"avg_quality": avg_quality,
|
|
425
|
+
"sample_count": len(qualities),
|
|
426
|
+
"trend": 0, # Simplified - no trend calculation
|
|
427
|
+
}
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
self.send_json(result)
|
|
431
|
+
except Exception as e:
|
|
432
|
+
logger.error(f"Failed to get quality metrics: {e}")
|
|
433
|
+
self.send_json([])
|
|
434
|
+
|
|
435
|
+
def api_underperforming(self, threshold: float):
|
|
436
|
+
"""Get underperforming stages."""
|
|
437
|
+
try:
|
|
438
|
+
r = self.get_redis()
|
|
439
|
+
if not r:
|
|
440
|
+
self.send_json([])
|
|
441
|
+
return
|
|
442
|
+
|
|
443
|
+
# Get all feedback and group by workflow/stage
|
|
444
|
+
feedback_groups = {}
|
|
445
|
+
|
|
446
|
+
for key in r.keys(b"feedback:*"):
|
|
447
|
+
try:
|
|
448
|
+
data = r.get(key)
|
|
449
|
+
if data:
|
|
450
|
+
feedback = json.loads(data.decode("utf-8"))
|
|
451
|
+
workflow = feedback.get("workflow_name")
|
|
452
|
+
stage = feedback.get("stage_name")
|
|
453
|
+
quality = feedback.get("quality_score")
|
|
454
|
+
|
|
455
|
+
group_key = f"{workflow}/{stage}"
|
|
456
|
+
if group_key not in feedback_groups:
|
|
457
|
+
feedback_groups[group_key] = {
|
|
458
|
+
"workflow_name": workflow,
|
|
459
|
+
"stage_name": stage,
|
|
460
|
+
"qualities": [],
|
|
461
|
+
}
|
|
462
|
+
feedback_groups[group_key]["qualities"].append(quality)
|
|
463
|
+
except Exception as e:
|
|
464
|
+
logger.error(f"Failed to parse feedback {key}: {e}")
|
|
465
|
+
|
|
466
|
+
# Find underperforming stages
|
|
467
|
+
result = []
|
|
468
|
+
for group_key, group in feedback_groups.items():
|
|
469
|
+
qualities = group["qualities"]
|
|
470
|
+
if qualities:
|
|
471
|
+
avg_quality = sum(qualities) / len(qualities)
|
|
472
|
+
if avg_quality < threshold:
|
|
473
|
+
result.append(
|
|
474
|
+
{
|
|
475
|
+
"workflow_name": group["workflow_name"],
|
|
476
|
+
"stage_name": group["stage_name"],
|
|
477
|
+
"avg_quality": avg_quality,
|
|
478
|
+
"sample_count": len(qualities),
|
|
479
|
+
"min_quality": min(qualities),
|
|
480
|
+
"max_quality": max(qualities),
|
|
481
|
+
"trend": 0,
|
|
482
|
+
}
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# Sort by quality (worst first)
|
|
486
|
+
result.sort(key=lambda x: x["avg_quality"])
|
|
487
|
+
self.send_json(result)
|
|
488
|
+
except Exception as e:
|
|
489
|
+
logger.error(f"Failed to get underperforming: {e}")
|
|
490
|
+
self.send_json([])
|
|
491
|
+
|
|
492
|
+
def log_message(self, format, *args):
|
|
493
|
+
"""Suppress default logging."""
|
|
494
|
+
# Override to reduce noise - only log errors
|
|
495
|
+
if args[1][0] in ("4", "5"): # 4xx or 5xx errors
|
|
496
|
+
logger.warning(f"{self.address_string()} - {format % args}")
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
def run_standalone_dashboard(host: str = "127.0.0.1", port: int = 8000):
|
|
500
|
+
"""Run standalone dashboard that reads directly from Redis.
|
|
501
|
+
|
|
502
|
+
This version bypasses the telemetry API layer and works with
|
|
503
|
+
data populated by scripts/populate_redis_direct.py.
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
host: Host to bind to (default: 127.0.0.1)
|
|
507
|
+
port: Port to bind to (default: 8000)
|
|
508
|
+
|
|
509
|
+
Example:
|
|
510
|
+
>>> from empathy_os.dashboard.standalone_server import run_standalone_dashboard
|
|
511
|
+
>>> run_standalone_dashboard(host="0.0.0.0", port=8080)
|
|
512
|
+
"""
|
|
513
|
+
if not REDIS_AVAILABLE:
|
|
514
|
+
print("⚠️ Warning: redis-py not installed. Install with: pip install redis")
|
|
515
|
+
print(" Dashboard will start but won't show data.")
|
|
516
|
+
print()
|
|
517
|
+
|
|
518
|
+
server = HTTPServer((host, port), StandaloneDashboardHandler)
|
|
519
|
+
|
|
520
|
+
print(f"🚀 Agent Coordination Dashboard (Standalone) running at http://{host}:{port}")
|
|
521
|
+
print(f"📊 Open in browser: http://{host}:{port}")
|
|
522
|
+
print()
|
|
523
|
+
print("💡 This version reads directly from Redis")
|
|
524
|
+
print(" Populate data with: python scripts/populate_redis_direct.py")
|
|
525
|
+
print()
|
|
526
|
+
print("Press Ctrl+C to stop")
|
|
527
|
+
|
|
528
|
+
try:
|
|
529
|
+
server.serve_forever()
|
|
530
|
+
except KeyboardInterrupt:
|
|
531
|
+
print("\n\n🛑 Shutting down dashboard...")
|
|
532
|
+
server.shutdown()
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
if __name__ == "__main__":
|
|
536
|
+
run_standalone_dashboard()
|
empathy_os/memory/__init__.py
CHANGED
|
@@ -123,17 +123,24 @@ from .security import ( # Audit Logging; PII Scrubbing; Secrets Detection
|
|
|
123
123
|
Severity,
|
|
124
124
|
detect_secrets,
|
|
125
125
|
)
|
|
126
|
-
from .short_term import
|
|
126
|
+
from .short_term import RedisShortTermMemory
|
|
127
|
+
|
|
128
|
+
# Conversation Summary Index
|
|
129
|
+
from .summary_index import AgentContext, ConversationSummaryIndex
|
|
130
|
+
|
|
131
|
+
# Types (extracted to types.py for cleaner separation)
|
|
132
|
+
from .types import (
|
|
127
133
|
AccessTier,
|
|
128
134
|
AgentCredentials,
|
|
129
135
|
ConflictContext,
|
|
130
|
-
|
|
136
|
+
PaginatedResult,
|
|
137
|
+
RedisConfig,
|
|
138
|
+
RedisMetrics,
|
|
131
139
|
StagedPattern,
|
|
140
|
+
TimeWindowQuery,
|
|
132
141
|
TTLStrategy,
|
|
133
142
|
)
|
|
134
|
-
|
|
135
|
-
# Conversation Summary Index
|
|
136
|
-
from .summary_index import AgentContext, ConversationSummaryIndex
|
|
143
|
+
from .types import SecurityError as ShortTermSecurityError
|
|
137
144
|
|
|
138
145
|
# Unified memory interface
|
|
139
146
|
from .unified import Environment, MemoryConfig, UnifiedMemory
|
|
@@ -180,6 +187,8 @@ __all__ = [
|
|
|
180
187
|
"MemoryStats",
|
|
181
188
|
"Node",
|
|
182
189
|
"NodeType",
|
|
190
|
+
# Pagination and Query Types
|
|
191
|
+
"PaginatedResult",
|
|
183
192
|
"PIIDetection",
|
|
184
193
|
"PIIPattern",
|
|
185
194
|
# Security - PII
|
|
@@ -187,6 +196,9 @@ __all__ = [
|
|
|
187
196
|
"PatternMetadata",
|
|
188
197
|
"PatternNode",
|
|
189
198
|
"PerformanceNode",
|
|
199
|
+
# Redis Configuration and Metrics
|
|
200
|
+
"RedisConfig",
|
|
201
|
+
"RedisMetrics",
|
|
190
202
|
# Short-term Memory
|
|
191
203
|
"RedisShortTermMemory",
|
|
192
204
|
"RedisStartMethod",
|
|
@@ -203,8 +215,10 @@ __all__ = [
|
|
|
203
215
|
"SessionInfo",
|
|
204
216
|
"SessionType",
|
|
205
217
|
"Severity",
|
|
218
|
+
"ShortTermSecurityError",
|
|
206
219
|
"StagedPattern",
|
|
207
220
|
"TTLStrategy",
|
|
221
|
+
"TimeWindowQuery",
|
|
208
222
|
# Unified Memory Interface (recommended)
|
|
209
223
|
"UnifiedMemory",
|
|
210
224
|
"VulnerabilityNode",
|
empathy_os/memory/short_term.py
CHANGED
|
@@ -107,7 +107,7 @@ class RedisShortTermMemory:
|
|
|
107
107
|
PREFIX_WORKING = "empathy:working:"
|
|
108
108
|
PREFIX_STAGED = "empathy:staged:"
|
|
109
109
|
PREFIX_CONFLICT = "empathy:conflict:"
|
|
110
|
-
PREFIX_COORDINATION
|
|
110
|
+
# PREFIX_COORDINATION removed in v5.0 - use empathy_os.telemetry.CoordinationSignals
|
|
111
111
|
PREFIX_SESSION = "empathy:session:"
|
|
112
112
|
PREFIX_PUBSUB = "empathy:pubsub:"
|
|
113
113
|
PREFIX_STREAM = "empathy:stream:"
|
|
@@ -920,75 +920,9 @@ class RedisShortTermMemory:
|
|
|
920
920
|
return True
|
|
921
921
|
|
|
922
922
|
# === Coordination Signals ===
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
signal_type: str,
|
|
927
|
-
data: Any,
|
|
928
|
-
credentials: AgentCredentials,
|
|
929
|
-
target_agent: str | None = None,
|
|
930
|
-
) -> bool:
|
|
931
|
-
"""Send coordination signal to other agents
|
|
932
|
-
|
|
933
|
-
Args:
|
|
934
|
-
signal_type: Type of signal (e.g., "ready", "blocking", "complete")
|
|
935
|
-
data: Signal payload
|
|
936
|
-
credentials: Must be CONTRIBUTOR or higher
|
|
937
|
-
target_agent: Specific agent to signal (None = broadcast)
|
|
938
|
-
|
|
939
|
-
Returns:
|
|
940
|
-
True if sent
|
|
941
|
-
|
|
942
|
-
"""
|
|
943
|
-
if not credentials.can_stage():
|
|
944
|
-
raise PermissionError(
|
|
945
|
-
f"Agent {credentials.agent_id} cannot send signals. "
|
|
946
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
947
|
-
)
|
|
948
|
-
|
|
949
|
-
target = target_agent or "broadcast"
|
|
950
|
-
key = f"{self.PREFIX_COORDINATION}{signal_type}:{credentials.agent_id}:{target}"
|
|
951
|
-
payload = {
|
|
952
|
-
"signal_type": signal_type,
|
|
953
|
-
"from_agent": credentials.agent_id,
|
|
954
|
-
"to_agent": target_agent,
|
|
955
|
-
"data": data,
|
|
956
|
-
"sent_at": datetime.now().isoformat(),
|
|
957
|
-
}
|
|
958
|
-
return self._set(key, json.dumps(payload), TTLStrategy.COORDINATION.value)
|
|
959
|
-
|
|
960
|
-
def receive_signals(
|
|
961
|
-
self,
|
|
962
|
-
credentials: AgentCredentials,
|
|
963
|
-
signal_type: str | None = None,
|
|
964
|
-
) -> list[dict]:
|
|
965
|
-
"""Receive coordination signals
|
|
966
|
-
|
|
967
|
-
Args:
|
|
968
|
-
credentials: Agent receiving signals
|
|
969
|
-
signal_type: Filter by signal type (optional)
|
|
970
|
-
|
|
971
|
-
Returns:
|
|
972
|
-
List of signals
|
|
973
|
-
|
|
974
|
-
"""
|
|
975
|
-
if signal_type:
|
|
976
|
-
pattern = f"{self.PREFIX_COORDINATION}{signal_type}:*:{credentials.agent_id}"
|
|
977
|
-
else:
|
|
978
|
-
pattern = f"{self.PREFIX_COORDINATION}*:{credentials.agent_id}"
|
|
979
|
-
|
|
980
|
-
# Also get broadcasts
|
|
981
|
-
broadcast_pattern = f"{self.PREFIX_COORDINATION}*:*:broadcast"
|
|
982
|
-
|
|
983
|
-
keys = set(self._keys(pattern)) | set(self._keys(broadcast_pattern))
|
|
984
|
-
signals = []
|
|
985
|
-
|
|
986
|
-
for key in keys:
|
|
987
|
-
raw = self._get(key)
|
|
988
|
-
if raw:
|
|
989
|
-
signals.append(json.loads(raw))
|
|
990
|
-
|
|
991
|
-
return signals
|
|
923
|
+
# REMOVED in v5.0 - Use empathy_os.telemetry.CoordinationSignals instead
|
|
924
|
+
# - send_signal() → CoordinationSignals.signal()
|
|
925
|
+
# - receive_signals() → CoordinationSignals.get_pending_signals()
|
|
992
926
|
|
|
993
927
|
# === Session Management ===
|
|
994
928
|
|