kailash 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +31 -0
- kailash/__main__.py +11 -0
- kailash/cli/__init__.py +5 -0
- kailash/cli/commands.py +563 -0
- kailash/manifest.py +778 -0
- kailash/nodes/__init__.py +23 -0
- kailash/nodes/ai/__init__.py +26 -0
- kailash/nodes/ai/agents.py +417 -0
- kailash/nodes/ai/models.py +488 -0
- kailash/nodes/api/__init__.py +52 -0
- kailash/nodes/api/auth.py +567 -0
- kailash/nodes/api/graphql.py +480 -0
- kailash/nodes/api/http.py +598 -0
- kailash/nodes/api/rate_limiting.py +572 -0
- kailash/nodes/api/rest.py +665 -0
- kailash/nodes/base.py +1032 -0
- kailash/nodes/base_async.py +128 -0
- kailash/nodes/code/__init__.py +32 -0
- kailash/nodes/code/python.py +1021 -0
- kailash/nodes/data/__init__.py +125 -0
- kailash/nodes/data/readers.py +496 -0
- kailash/nodes/data/sharepoint_graph.py +623 -0
- kailash/nodes/data/sql.py +380 -0
- kailash/nodes/data/streaming.py +1168 -0
- kailash/nodes/data/vector_db.py +964 -0
- kailash/nodes/data/writers.py +529 -0
- kailash/nodes/logic/__init__.py +6 -0
- kailash/nodes/logic/async_operations.py +702 -0
- kailash/nodes/logic/operations.py +551 -0
- kailash/nodes/transform/__init__.py +5 -0
- kailash/nodes/transform/processors.py +379 -0
- kailash/runtime/__init__.py +6 -0
- kailash/runtime/async_local.py +356 -0
- kailash/runtime/docker.py +697 -0
- kailash/runtime/local.py +434 -0
- kailash/runtime/parallel.py +557 -0
- kailash/runtime/runner.py +110 -0
- kailash/runtime/testing.py +347 -0
- kailash/sdk_exceptions.py +307 -0
- kailash/tracking/__init__.py +7 -0
- kailash/tracking/manager.py +885 -0
- kailash/tracking/metrics_collector.py +342 -0
- kailash/tracking/models.py +535 -0
- kailash/tracking/storage/__init__.py +0 -0
- kailash/tracking/storage/base.py +113 -0
- kailash/tracking/storage/database.py +619 -0
- kailash/tracking/storage/filesystem.py +543 -0
- kailash/utils/__init__.py +0 -0
- kailash/utils/export.py +924 -0
- kailash/utils/templates.py +680 -0
- kailash/visualization/__init__.py +62 -0
- kailash/visualization/api.py +732 -0
- kailash/visualization/dashboard.py +951 -0
- kailash/visualization/performance.py +808 -0
- kailash/visualization/reports.py +1471 -0
- kailash/workflow/__init__.py +15 -0
- kailash/workflow/builder.py +245 -0
- kailash/workflow/graph.py +827 -0
- kailash/workflow/mermaid_visualizer.py +628 -0
- kailash/workflow/mock_registry.py +63 -0
- kailash/workflow/runner.py +302 -0
- kailash/workflow/state.py +238 -0
- kailash/workflow/visualization.py +588 -0
- kailash-0.1.0.dist-info/METADATA +710 -0
- kailash-0.1.0.dist-info/RECORD +69 -0
- kailash-0.1.0.dist-info/WHEEL +5 -0
- kailash-0.1.0.dist-info/entry_points.txt +2 -0
- kailash-0.1.0.dist-info/licenses/LICENSE +21 -0
- kailash-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,951 @@
|
|
1
|
+
"""Real-time dashboard components for workflow monitoring.
|
2
|
+
|
3
|
+
This module provides comprehensive dashboard components for real-time monitoring
|
4
|
+
of workflow execution, performance metrics, and task tracking.
|
5
|
+
|
6
|
+
Design Purpose:
|
7
|
+
- Enable real-time monitoring of workflow execution progress
|
8
|
+
- Provide interactive visualizations for performance metrics
|
9
|
+
- Support both live streaming and historical analysis
|
10
|
+
- Generate embeddable dashboard components for web interfaces
|
11
|
+
|
12
|
+
Upstream Dependencies:
|
13
|
+
- TaskManager provides real-time task execution data
|
14
|
+
- MetricsCollector provides performance metrics
|
15
|
+
- PerformanceVisualizer provides static chart generation
|
16
|
+
|
17
|
+
Downstream Consumers:
|
18
|
+
- Web dashboard interfaces embed these components
|
19
|
+
- CLI tools use dashboard for real-time monitoring
|
20
|
+
- Export utilities include dashboard snapshots in reports
|
21
|
+
"""
|
22
|
+
|
23
|
+
import json
|
24
|
+
import logging
|
25
|
+
import threading
|
26
|
+
import time
|
27
|
+
from dataclasses import dataclass, field
|
28
|
+
from datetime import datetime, timedelta
|
29
|
+
from pathlib import Path
|
30
|
+
from typing import Any, Dict, List, Optional, Union
|
31
|
+
|
32
|
+
import numpy as np
|
33
|
+
|
34
|
+
from kailash.tracking.manager import TaskManager
|
35
|
+
from kailash.tracking.models import TaskStatus
|
36
|
+
from kailash.visualization.performance import PerformanceVisualizer
|
37
|
+
|
38
|
+
logger = logging.getLogger(__name__)
|
39
|
+
|
40
|
+
|
41
|
+
@dataclass
|
42
|
+
class DashboardConfig:
|
43
|
+
"""Configuration for dashboard components.
|
44
|
+
|
45
|
+
Attributes:
|
46
|
+
update_interval: Seconds between dashboard updates
|
47
|
+
max_history_points: Maximum data points to keep in memory
|
48
|
+
auto_refresh: Whether to automatically refresh data
|
49
|
+
show_completed: Whether to show completed tasks
|
50
|
+
show_failed: Whether to show failed tasks
|
51
|
+
theme: Dashboard color theme ('light' or 'dark')
|
52
|
+
"""
|
53
|
+
|
54
|
+
update_interval: float = 1.0
|
55
|
+
max_history_points: int = 100
|
56
|
+
auto_refresh: bool = True
|
57
|
+
show_completed: bool = True
|
58
|
+
show_failed: bool = True
|
59
|
+
theme: str = "light"
|
60
|
+
|
61
|
+
|
62
|
+
@dataclass
|
63
|
+
class LiveMetrics:
|
64
|
+
"""Container for live performance metrics.
|
65
|
+
|
66
|
+
Attributes:
|
67
|
+
timestamp: When metrics were collected
|
68
|
+
active_tasks: Number of currently running tasks
|
69
|
+
completed_tasks: Number of completed tasks
|
70
|
+
failed_tasks: Number of failed tasks
|
71
|
+
total_cpu_usage: System-wide CPU usage percentage
|
72
|
+
total_memory_usage: System-wide memory usage in MB
|
73
|
+
throughput: Tasks completed per minute
|
74
|
+
avg_task_duration: Average task execution time
|
75
|
+
"""
|
76
|
+
|
77
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
78
|
+
active_tasks: int = 0
|
79
|
+
completed_tasks: int = 0
|
80
|
+
failed_tasks: int = 0
|
81
|
+
total_cpu_usage: float = 0.0
|
82
|
+
total_memory_usage: float = 0.0
|
83
|
+
throughput: float = 0.0
|
84
|
+
avg_task_duration: float = 0.0
|
85
|
+
|
86
|
+
|
87
|
+
class RealTimeDashboard:
|
88
|
+
"""Real-time dashboard for workflow monitoring.
|
89
|
+
|
90
|
+
This class provides comprehensive real-time monitoring capabilities
|
91
|
+
including live metrics collection, interactive visualizations, and
|
92
|
+
status reporting for workflow execution.
|
93
|
+
|
94
|
+
Usage:
|
95
|
+
dashboard = RealTimeDashboard(task_manager)
|
96
|
+
dashboard.start_monitoring()
|
97
|
+
# Dashboard runs in background
|
98
|
+
dashboard.generate_live_report("output.html")
|
99
|
+
dashboard.stop_monitoring()
|
100
|
+
"""
|
101
|
+
|
102
|
+
def __init__(
|
103
|
+
self, task_manager: TaskManager, config: Optional[DashboardConfig] = None
|
104
|
+
):
|
105
|
+
"""Initialize real-time dashboard.
|
106
|
+
|
107
|
+
Args:
|
108
|
+
task_manager: TaskManager instance for data access
|
109
|
+
config: Dashboard configuration options
|
110
|
+
"""
|
111
|
+
self.task_manager = task_manager
|
112
|
+
self.config = config or DashboardConfig()
|
113
|
+
self.performance_viz = PerformanceVisualizer(task_manager)
|
114
|
+
|
115
|
+
# Live monitoring state
|
116
|
+
self._monitoring = False
|
117
|
+
self._monitor_thread: Optional[threading.Thread] = None
|
118
|
+
self._metrics_history: List[LiveMetrics] = []
|
119
|
+
self._current_run_id: Optional[str] = None
|
120
|
+
|
121
|
+
# Event callbacks
|
122
|
+
self._status_callbacks: List[callable] = []
|
123
|
+
self._metrics_callbacks: List[callable] = []
|
124
|
+
|
125
|
+
self.logger = logger
|
126
|
+
|
127
|
+
def start_monitoring(self, run_id: Optional[str] = None):
|
128
|
+
"""Start real-time monitoring for a workflow run.
|
129
|
+
|
130
|
+
Args:
|
131
|
+
run_id: Specific run to monitor, or None for latest
|
132
|
+
"""
|
133
|
+
if self._monitoring:
|
134
|
+
self.logger.warning("Monitoring already active")
|
135
|
+
return
|
136
|
+
|
137
|
+
self._current_run_id = run_id
|
138
|
+
self._monitoring = True
|
139
|
+
self._metrics_history.clear()
|
140
|
+
|
141
|
+
# Start monitoring thread
|
142
|
+
self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
143
|
+
self._monitor_thread.start()
|
144
|
+
|
145
|
+
self.logger.info(f"Started monitoring for run: {run_id or 'latest'}")
|
146
|
+
|
147
|
+
def stop_monitoring(self):
|
148
|
+
"""Stop real-time monitoring."""
|
149
|
+
if not self._monitoring:
|
150
|
+
return
|
151
|
+
|
152
|
+
self._monitoring = False
|
153
|
+
if self._monitor_thread:
|
154
|
+
self._monitor_thread.join(timeout=5.0)
|
155
|
+
|
156
|
+
self.logger.info("Stopped monitoring")
|
157
|
+
|
158
|
+
def _monitor_loop(self):
|
159
|
+
"""Main monitoring loop running in background thread."""
|
160
|
+
while self._monitoring:
|
161
|
+
try:
|
162
|
+
# Collect current metrics
|
163
|
+
metrics = self._collect_live_metrics()
|
164
|
+
|
165
|
+
# Update history
|
166
|
+
self._metrics_history.append(metrics)
|
167
|
+
if len(self._metrics_history) > self.config.max_history_points:
|
168
|
+
self._metrics_history.pop(0)
|
169
|
+
|
170
|
+
# Trigger callbacks
|
171
|
+
for callback in self._metrics_callbacks:
|
172
|
+
try:
|
173
|
+
callback(metrics)
|
174
|
+
except Exception as e:
|
175
|
+
self.logger.warning(f"Metrics callback failed: {e}")
|
176
|
+
|
177
|
+
# Check for status changes
|
178
|
+
self._check_status_changes()
|
179
|
+
|
180
|
+
time.sleep(self.config.update_interval)
|
181
|
+
|
182
|
+
except Exception as e:
|
183
|
+
self.logger.error(f"Monitoring loop error: {e}")
|
184
|
+
time.sleep(self.config.update_interval)
|
185
|
+
|
186
|
+
def _collect_live_metrics(self) -> LiveMetrics:
|
187
|
+
"""Collect current performance metrics."""
|
188
|
+
metrics = LiveMetrics()
|
189
|
+
|
190
|
+
try:
|
191
|
+
# Get current run tasks
|
192
|
+
if self._current_run_id:
|
193
|
+
tasks = self.task_manager.get_run_tasks(self._current_run_id)
|
194
|
+
else:
|
195
|
+
# Get tasks from most recent run
|
196
|
+
recent_runs = self.task_manager.list_runs()
|
197
|
+
if recent_runs:
|
198
|
+
# Get the most recent run (first in list)
|
199
|
+
tasks = self.task_manager.get_run_tasks(recent_runs[0].run_id)
|
200
|
+
else:
|
201
|
+
tasks = []
|
202
|
+
|
203
|
+
# Count task statuses
|
204
|
+
metrics.active_tasks = sum(
|
205
|
+
1 for t in tasks if t.status == TaskStatus.RUNNING
|
206
|
+
)
|
207
|
+
metrics.completed_tasks = sum(
|
208
|
+
1 for t in tasks if t.status == TaskStatus.COMPLETED
|
209
|
+
)
|
210
|
+
metrics.failed_tasks = sum(
|
211
|
+
1 for t in tasks if t.status == TaskStatus.FAILED
|
212
|
+
)
|
213
|
+
|
214
|
+
# Calculate performance metrics
|
215
|
+
completed = [
|
216
|
+
t for t in tasks if t.status == TaskStatus.COMPLETED and t.metrics
|
217
|
+
]
|
218
|
+
|
219
|
+
if completed:
|
220
|
+
# CPU and memory aggregation
|
221
|
+
cpu_values = [
|
222
|
+
t.metrics.cpu_usage for t in completed if t.metrics.cpu_usage
|
223
|
+
]
|
224
|
+
memory_values = [
|
225
|
+
t.metrics.memory_usage_mb
|
226
|
+
for t in completed
|
227
|
+
if t.metrics.memory_usage_mb
|
228
|
+
]
|
229
|
+
duration_values = [
|
230
|
+
t.metrics.duration for t in completed if t.metrics.duration
|
231
|
+
]
|
232
|
+
|
233
|
+
if cpu_values:
|
234
|
+
metrics.total_cpu_usage = np.mean(cpu_values)
|
235
|
+
if memory_values:
|
236
|
+
metrics.total_memory_usage = sum(memory_values)
|
237
|
+
if duration_values:
|
238
|
+
metrics.avg_task_duration = np.mean(duration_values)
|
239
|
+
|
240
|
+
# Calculate throughput (tasks/minute)
|
241
|
+
if len(self._metrics_history) > 1:
|
242
|
+
prev_completed = self._metrics_history[-1].completed_tasks
|
243
|
+
time_diff = (
|
244
|
+
metrics.timestamp - self._metrics_history[-1].timestamp
|
245
|
+
).total_seconds() / 60
|
246
|
+
if time_diff > 0:
|
247
|
+
metrics.throughput = (
|
248
|
+
metrics.completed_tasks - prev_completed
|
249
|
+
) / time_diff
|
250
|
+
|
251
|
+
except Exception as e:
|
252
|
+
self.logger.warning(f"Failed to collect metrics: {e}")
|
253
|
+
|
254
|
+
return metrics
|
255
|
+
|
256
|
+
def _check_status_changes(self):
|
257
|
+
"""Check for significant status changes and trigger callbacks."""
|
258
|
+
if len(self._metrics_history) < 2:
|
259
|
+
return
|
260
|
+
|
261
|
+
current = self._metrics_history[-1]
|
262
|
+
previous = self._metrics_history[-2]
|
263
|
+
|
264
|
+
# Check for task completion or failure
|
265
|
+
if current.completed_tasks > previous.completed_tasks:
|
266
|
+
for callback in self._status_callbacks:
|
267
|
+
try:
|
268
|
+
callback(
|
269
|
+
"task_completed",
|
270
|
+
current.completed_tasks - previous.completed_tasks,
|
271
|
+
)
|
272
|
+
except Exception as e:
|
273
|
+
self.logger.warning(f"Status callback failed: {e}")
|
274
|
+
|
275
|
+
if current.failed_tasks > previous.failed_tasks:
|
276
|
+
for callback in self._status_callbacks:
|
277
|
+
try:
|
278
|
+
callback(
|
279
|
+
"task_failed", current.failed_tasks - previous.failed_tasks
|
280
|
+
)
|
281
|
+
except Exception as e:
|
282
|
+
self.logger.warning(f"Status callback failed: {e}")
|
283
|
+
|
284
|
+
def add_metrics_callback(self, callback: callable):
|
285
|
+
"""Add callback for metrics updates.
|
286
|
+
|
287
|
+
Args:
|
288
|
+
callback: Function that takes LiveMetrics as argument
|
289
|
+
"""
|
290
|
+
self._metrics_callbacks.append(callback)
|
291
|
+
|
292
|
+
def add_status_callback(self, callback: callable):
|
293
|
+
"""Add callback for status changes.
|
294
|
+
|
295
|
+
Args:
|
296
|
+
callback: Function that takes (event_type, count) as arguments
|
297
|
+
"""
|
298
|
+
self._status_callbacks.append(callback)
|
299
|
+
|
300
|
+
def get_current_metrics(self) -> Optional[LiveMetrics]:
|
301
|
+
"""Get the most recent metrics."""
|
302
|
+
return self._metrics_history[-1] if self._metrics_history else None
|
303
|
+
|
304
|
+
def get_metrics_history(self, minutes: Optional[int] = None) -> List[LiveMetrics]:
|
305
|
+
"""Get metrics history for specified time period.
|
306
|
+
|
307
|
+
Args:
|
308
|
+
minutes: Number of minutes of history to return
|
309
|
+
|
310
|
+
Returns:
|
311
|
+
List of metrics within time period
|
312
|
+
"""
|
313
|
+
if minutes is None:
|
314
|
+
return self._metrics_history.copy()
|
315
|
+
|
316
|
+
cutoff = datetime.now() - timedelta(minutes=minutes)
|
317
|
+
return [m for m in self._metrics_history if m.timestamp >= cutoff]
|
318
|
+
|
319
|
+
def generate_live_report(
|
320
|
+
self, output_path: Union[str, Path], include_charts: bool = True
|
321
|
+
) -> Path:
|
322
|
+
"""Generate comprehensive live dashboard report.
|
323
|
+
|
324
|
+
Args:
|
325
|
+
output_path: Path to save HTML dashboard
|
326
|
+
include_charts: Whether to include performance charts
|
327
|
+
|
328
|
+
Returns:
|
329
|
+
Path to generated dashboard file
|
330
|
+
"""
|
331
|
+
output_path = Path(output_path)
|
332
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
333
|
+
|
334
|
+
# Generate dashboard HTML
|
335
|
+
html_content = self._generate_dashboard_html(include_charts)
|
336
|
+
|
337
|
+
with open(output_path, "w") as f:
|
338
|
+
f.write(html_content)
|
339
|
+
|
340
|
+
self.logger.info(f"Generated live dashboard: {output_path}")
|
341
|
+
return output_path
|
342
|
+
|
343
|
+
def _generate_dashboard_html(self, include_charts: bool = True) -> str:
|
344
|
+
"""Generate HTML content for dashboard."""
|
345
|
+
current_metrics = self.get_current_metrics()
|
346
|
+
metrics_history = self.get_metrics_history(minutes=30) # Last 30 minutes
|
347
|
+
|
348
|
+
# Basic status info
|
349
|
+
status_section = self._generate_status_section(current_metrics)
|
350
|
+
|
351
|
+
# Live metrics section
|
352
|
+
live_metrics_section = self._generate_live_metrics_section(metrics_history)
|
353
|
+
|
354
|
+
# Charts section (if requested)
|
355
|
+
charts_section = ""
|
356
|
+
if include_charts and self._current_run_id:
|
357
|
+
charts_section = self._generate_charts_section()
|
358
|
+
|
359
|
+
# Task list section
|
360
|
+
task_list_section = self._generate_task_list_section()
|
361
|
+
|
362
|
+
# Combine all sections
|
363
|
+
html_template = f"""
|
364
|
+
<!DOCTYPE html>
|
365
|
+
<html>
|
366
|
+
<head>
|
367
|
+
<title>Real-time Workflow Dashboard</title>
|
368
|
+
<meta charset="utf-8">
|
369
|
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
370
|
+
<style>
|
371
|
+
{self._get_dashboard_css()}
|
372
|
+
</style>
|
373
|
+
<script>
|
374
|
+
{self._get_dashboard_javascript()}
|
375
|
+
</script>
|
376
|
+
</head>
|
377
|
+
<body>
|
378
|
+
<div class="dashboard-container">
|
379
|
+
<header class="dashboard-header">
|
380
|
+
<h1>🚀 Workflow Dashboard</h1>
|
381
|
+
<div class="status-indicator">
|
382
|
+
<span class="{'status-active' if self._monitoring else 'status-inactive'}">
|
383
|
+
{'🟢 Live Monitoring' if self._monitoring else '🔴 Monitoring Stopped'}
|
384
|
+
</span>
|
385
|
+
</div>
|
386
|
+
</header>
|
387
|
+
|
388
|
+
{status_section}
|
389
|
+
{live_metrics_section}
|
390
|
+
{charts_section}
|
391
|
+
{task_list_section}
|
392
|
+
</div>
|
393
|
+
</body>
|
394
|
+
</html>
|
395
|
+
"""
|
396
|
+
|
397
|
+
return html_template
|
398
|
+
|
399
|
+
def _generate_status_section(self, metrics: Optional[LiveMetrics]) -> str:
|
400
|
+
"""Generate status overview section."""
|
401
|
+
if not metrics:
|
402
|
+
return """
|
403
|
+
<section class="status-section">
|
404
|
+
<h2>📊 Current Status</h2>
|
405
|
+
<div class="status-grid">
|
406
|
+
<div class="status-card">
|
407
|
+
<span class="status-label">No Data Available</span>
|
408
|
+
</div>
|
409
|
+
</div>
|
410
|
+
</section>
|
411
|
+
"""
|
412
|
+
|
413
|
+
return f"""
|
414
|
+
<section class="status-section">
|
415
|
+
<h2>📊 Current Status</h2>
|
416
|
+
<div class="status-grid">
|
417
|
+
<div class="status-card">
|
418
|
+
<span class="status-value">{metrics.active_tasks}</span>
|
419
|
+
<span class="status-label">Active Tasks</span>
|
420
|
+
</div>
|
421
|
+
<div class="status-card">
|
422
|
+
<span class="status-value">{metrics.completed_tasks}</span>
|
423
|
+
<span class="status-label">Completed</span>
|
424
|
+
</div>
|
425
|
+
<div class="status-card">
|
426
|
+
<span class="status-value">{metrics.failed_tasks}</span>
|
427
|
+
<span class="status-label">Failed</span>
|
428
|
+
</div>
|
429
|
+
<div class="status-card">
|
430
|
+
<span class="status-value">{metrics.throughput:.1f}</span>
|
431
|
+
<span class="status-label">Tasks/Min</span>
|
432
|
+
</div>
|
433
|
+
<div class="status-card">
|
434
|
+
<span class="status-value">{metrics.total_cpu_usage:.1f}%</span>
|
435
|
+
<span class="status-label">Avg CPU</span>
|
436
|
+
</div>
|
437
|
+
<div class="status-card">
|
438
|
+
<span class="status-value">{metrics.total_memory_usage:.0f}MB</span>
|
439
|
+
<span class="status-label">Total Memory</span>
|
440
|
+
</div>
|
441
|
+
</div>
|
442
|
+
</section>
|
443
|
+
"""
|
444
|
+
|
445
|
+
def _generate_live_metrics_section(self, history: List[LiveMetrics]) -> str:
|
446
|
+
"""Generate live metrics charts section."""
|
447
|
+
if not history:
|
448
|
+
return """
|
449
|
+
<section class="metrics-section">
|
450
|
+
<h2>📈 Live Metrics</h2>
|
451
|
+
<p>No metrics data available</p>
|
452
|
+
</section>
|
453
|
+
"""
|
454
|
+
|
455
|
+
# Prepare data for charts
|
456
|
+
timestamps = [m.timestamp.strftime("%H:%M:%S") for m in history]
|
457
|
+
cpu_data = [m.total_cpu_usage for m in history]
|
458
|
+
memory_data = [m.total_memory_usage for m in history]
|
459
|
+
throughput_data = [m.throughput for m in history]
|
460
|
+
|
461
|
+
return f"""
|
462
|
+
<section class="metrics-section">
|
463
|
+
<h2>📈 Live Metrics (Last 30 minutes)</h2>
|
464
|
+
<div class="charts-grid">
|
465
|
+
<div class="chart-container">
|
466
|
+
<h3>CPU Usage</h3>
|
467
|
+
<canvas id="cpuChart"></canvas>
|
468
|
+
</div>
|
469
|
+
<div class="chart-container">
|
470
|
+
<h3>Memory Usage</h3>
|
471
|
+
<canvas id="memoryChart"></canvas>
|
472
|
+
</div>
|
473
|
+
<div class="chart-container">
|
474
|
+
<h3>Throughput</h3>
|
475
|
+
<canvas id="throughputChart"></canvas>
|
476
|
+
</div>
|
477
|
+
</div>
|
478
|
+
<script>
|
479
|
+
drawLiveCharts({json.dumps(timestamps)}, {json.dumps(cpu_data)},
|
480
|
+
{json.dumps(memory_data)}, {json.dumps(throughput_data)});
|
481
|
+
</script>
|
482
|
+
</section>
|
483
|
+
"""
|
484
|
+
|
485
|
+
def _generate_charts_section(self) -> str:
|
486
|
+
"""Generate performance charts section."""
|
487
|
+
if not self._current_run_id:
|
488
|
+
return ""
|
489
|
+
|
490
|
+
return f"""
|
491
|
+
<section class="charts-section">
|
492
|
+
<h2>📊 Performance Analysis</h2>
|
493
|
+
<div class="charts-grid">
|
494
|
+
<div class="chart-item">
|
495
|
+
<h3>Execution Timeline</h3>
|
496
|
+
<img src="timeline_{self._current_run_id}.png" alt="Timeline" class="chart-image">
|
497
|
+
</div>
|
498
|
+
<div class="chart-item">
|
499
|
+
<h3>Resource Usage</h3>
|
500
|
+
<img src="resources_{self._current_run_id}.png" alt="Resources" class="chart-image">
|
501
|
+
</div>
|
502
|
+
<div class="chart-item">
|
503
|
+
<h3>Performance Heatmap</h3>
|
504
|
+
<img src="heatmap_{self._current_run_id}.png" alt="Heatmap" class="chart-image">
|
505
|
+
</div>
|
506
|
+
</div>
|
507
|
+
</section>
|
508
|
+
"""
|
509
|
+
|
510
|
+
def _generate_task_list_section(self) -> str:
|
511
|
+
"""Generate task list section."""
|
512
|
+
if not self._current_run_id:
|
513
|
+
return """
|
514
|
+
<section class="tasks-section">
|
515
|
+
<h2>📋 Recent Tasks</h2>
|
516
|
+
<p>No active workflow</p>
|
517
|
+
</section>
|
518
|
+
"""
|
519
|
+
|
520
|
+
tasks = self.task_manager.get_run_tasks(self._current_run_id)
|
521
|
+
recent_tasks = sorted(
|
522
|
+
tasks, key=lambda t: t.started_at or datetime.min, reverse=True
|
523
|
+
)[:10]
|
524
|
+
|
525
|
+
task_rows = ""
|
526
|
+
for task in recent_tasks:
|
527
|
+
status_class = {
|
528
|
+
TaskStatus.RUNNING: "status-running",
|
529
|
+
TaskStatus.COMPLETED: "status-completed",
|
530
|
+
TaskStatus.FAILED: "status-failed",
|
531
|
+
TaskStatus.PENDING: "status-pending",
|
532
|
+
}.get(task.status, "status-unknown")
|
533
|
+
|
534
|
+
duration = ""
|
535
|
+
if task.metrics and task.metrics.duration:
|
536
|
+
duration = f"{task.metrics.duration:.2f}s"
|
537
|
+
|
538
|
+
task_rows += f"""
|
539
|
+
<tr class="{status_class}">
|
540
|
+
<td>{task.node_id}</td>
|
541
|
+
<td>{task.node_type}</td>
|
542
|
+
<td><span class="status-badge {status_class}">{task.status}</span></td>
|
543
|
+
<td>{duration}</td>
|
544
|
+
<td>{task.started_at.strftime('%H:%M:%S') if task.started_at else 'N/A'}</td>
|
545
|
+
</tr>
|
546
|
+
"""
|
547
|
+
|
548
|
+
return f"""
|
549
|
+
<section class="tasks-section">
|
550
|
+
<h2>📋 Recent Tasks</h2>
|
551
|
+
<table class="tasks-table">
|
552
|
+
<thead>
|
553
|
+
<tr>
|
554
|
+
<th>Node ID</th>
|
555
|
+
<th>Type</th>
|
556
|
+
<th>Status</th>
|
557
|
+
<th>Duration</th>
|
558
|
+
<th>Started</th>
|
559
|
+
</tr>
|
560
|
+
</thead>
|
561
|
+
<tbody>
|
562
|
+
{task_rows}
|
563
|
+
</tbody>
|
564
|
+
</table>
|
565
|
+
</section>
|
566
|
+
"""
|
567
|
+
|
568
|
+
def _get_dashboard_css(self) -> str:
|
569
|
+
"""Get CSS styles for dashboard."""
|
570
|
+
theme_colors = {
|
571
|
+
"light": {
|
572
|
+
"bg": "#f8f9fa",
|
573
|
+
"card_bg": "#ffffff",
|
574
|
+
"text": "#333333",
|
575
|
+
"border": "#e9ecef",
|
576
|
+
"primary": "#007bff",
|
577
|
+
"success": "#28a745",
|
578
|
+
"danger": "#dc3545",
|
579
|
+
"warning": "#ffc107",
|
580
|
+
},
|
581
|
+
"dark": {
|
582
|
+
"bg": "#121212",
|
583
|
+
"card_bg": "#1e1e1e",
|
584
|
+
"text": "#ffffff",
|
585
|
+
"border": "#333333",
|
586
|
+
"primary": "#1976d2",
|
587
|
+
"success": "#4caf50",
|
588
|
+
"danger": "#f44336",
|
589
|
+
"warning": "#ff9800",
|
590
|
+
},
|
591
|
+
}
|
592
|
+
|
593
|
+
colors = theme_colors[self.config.theme]
|
594
|
+
|
595
|
+
return f"""
|
596
|
+
* {{
|
597
|
+
margin: 0;
|
598
|
+
padding: 0;
|
599
|
+
box-sizing: border-box;
|
600
|
+
}}
|
601
|
+
|
602
|
+
body {{
|
603
|
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
604
|
+
background-color: {colors['bg']};
|
605
|
+
color: {colors['text']};
|
606
|
+
line-height: 1.6;
|
607
|
+
}}
|
608
|
+
|
609
|
+
.dashboard-container {{
|
610
|
+
max-width: 1400px;
|
611
|
+
margin: 0 auto;
|
612
|
+
padding: 20px;
|
613
|
+
}}
|
614
|
+
|
615
|
+
.dashboard-header {{
|
616
|
+
display: flex;
|
617
|
+
justify-content: space-between;
|
618
|
+
align-items: center;
|
619
|
+
margin-bottom: 30px;
|
620
|
+
padding: 20px;
|
621
|
+
background: {colors['card_bg']};
|
622
|
+
border-radius: 8px;
|
623
|
+
border: 1px solid {colors['border']};
|
624
|
+
}}
|
625
|
+
|
626
|
+
.dashboard-header h1 {{
|
627
|
+
color: {colors['primary']};
|
628
|
+
font-size: 2em;
|
629
|
+
}}
|
630
|
+
|
631
|
+
.status-indicator {{
|
632
|
+
font-weight: bold;
|
633
|
+
}}
|
634
|
+
|
635
|
+
.status-active {{
|
636
|
+
color: {colors['success']};
|
637
|
+
}}
|
638
|
+
|
639
|
+
.status-inactive {{
|
640
|
+
color: {colors['danger']};
|
641
|
+
}}
|
642
|
+
|
643
|
+
section {{
|
644
|
+
margin-bottom: 30px;
|
645
|
+
padding: 20px;
|
646
|
+
background: {colors['card_bg']};
|
647
|
+
border-radius: 8px;
|
648
|
+
border: 1px solid {colors['border']};
|
649
|
+
}}
|
650
|
+
|
651
|
+
section h2 {{
|
652
|
+
margin-bottom: 20px;
|
653
|
+
color: {colors['primary']};
|
654
|
+
font-size: 1.5em;
|
655
|
+
}}
|
656
|
+
|
657
|
+
.status-grid {{
|
658
|
+
display: grid;
|
659
|
+
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
|
660
|
+
gap: 15px;
|
661
|
+
}}
|
662
|
+
|
663
|
+
.status-card {{
|
664
|
+
text-align: center;
|
665
|
+
padding: 20px;
|
666
|
+
background: {colors['bg']};
|
667
|
+
border-radius: 6px;
|
668
|
+
border: 1px solid {colors['border']};
|
669
|
+
}}
|
670
|
+
|
671
|
+
.status-value {{
|
672
|
+
display: block;
|
673
|
+
font-size: 2em;
|
674
|
+
font-weight: bold;
|
675
|
+
color: {colors['primary']};
|
676
|
+
}}
|
677
|
+
|
678
|
+
.status-label {{
|
679
|
+
display: block;
|
680
|
+
font-size: 0.9em;
|
681
|
+
color: {colors['text']};
|
682
|
+
opacity: 0.8;
|
683
|
+
}}
|
684
|
+
|
685
|
+
.charts-grid {{
|
686
|
+
display: grid;
|
687
|
+
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
|
688
|
+
gap: 20px;
|
689
|
+
}}
|
690
|
+
|
691
|
+
.chart-container, .chart-item {{
|
692
|
+
padding: 15px;
|
693
|
+
background: {colors['bg']};
|
694
|
+
border-radius: 6px;
|
695
|
+
border: 1px solid {colors['border']};
|
696
|
+
}}
|
697
|
+
|
698
|
+
.chart-container h3, .chart-item h3 {{
|
699
|
+
margin-bottom: 10px;
|
700
|
+
color: {colors['text']};
|
701
|
+
}}
|
702
|
+
|
703
|
+
.chart-image {{
|
704
|
+
max-width: 100%;
|
705
|
+
height: auto;
|
706
|
+
border-radius: 4px;
|
707
|
+
}}
|
708
|
+
|
709
|
+
.tasks-table {{
|
710
|
+
width: 100%;
|
711
|
+
border-collapse: collapse;
|
712
|
+
}}
|
713
|
+
|
714
|
+
.tasks-table th,
|
715
|
+
.tasks-table td {{
|
716
|
+
padding: 12px;
|
717
|
+
text-align: left;
|
718
|
+
border-bottom: 1px solid {colors['border']};
|
719
|
+
}}
|
720
|
+
|
721
|
+
.tasks-table th {{
|
722
|
+
background: {colors['bg']};
|
723
|
+
font-weight: bold;
|
724
|
+
}}
|
725
|
+
|
726
|
+
.status-badge {{
|
727
|
+
padding: 4px 8px;
|
728
|
+
border-radius: 4px;
|
729
|
+
font-size: 0.8em;
|
730
|
+
font-weight: bold;
|
731
|
+
text-transform: uppercase;
|
732
|
+
}}
|
733
|
+
|
734
|
+
.status-running {{
|
735
|
+
background: {colors['primary']};
|
736
|
+
color: white;
|
737
|
+
}}
|
738
|
+
|
739
|
+
.status-completed {{
|
740
|
+
background: {colors['success']};
|
741
|
+
color: white;
|
742
|
+
}}
|
743
|
+
|
744
|
+
.status-failed {{
|
745
|
+
background: {colors['danger']};
|
746
|
+
color: white;
|
747
|
+
}}
|
748
|
+
|
749
|
+
.status-pending {{
|
750
|
+
background: {colors['warning']};
|
751
|
+
color: black;
|
752
|
+
}}
|
753
|
+
|
754
|
+
canvas {{
|
755
|
+
max-width: 100%;
|
756
|
+
height: 200px;
|
757
|
+
}}
|
758
|
+
"""
|
759
|
+
|
760
|
+
def _get_dashboard_javascript(self) -> str:
|
761
|
+
"""Get JavaScript for dashboard functionality."""
|
762
|
+
return """
|
763
|
+
function drawLiveCharts(timestamps, cpuData, memoryData, throughputData) {
|
764
|
+
// Simple canvas-based charts (replace with Chart.js or similar for production)
|
765
|
+
drawSimpleChart('cpuChart', timestamps, cpuData, 'CPU %', '#007bff');
|
766
|
+
drawSimpleChart('memoryChart', timestamps, memoryData, 'Memory MB', '#28a745');
|
767
|
+
drawSimpleChart('throughputChart', timestamps, throughputData, 'Tasks/Min', '#ffc107');
|
768
|
+
}
|
769
|
+
|
770
|
+
function drawSimpleChart(canvasId, labels, data, label, color) {
|
771
|
+
const canvas = document.getElementById(canvasId);
|
772
|
+
if (!canvas) return;
|
773
|
+
|
774
|
+
const ctx = canvas.getContext('2d');
|
775
|
+
const width = canvas.width = canvas.offsetWidth;
|
776
|
+
const height = canvas.height = 200;
|
777
|
+
|
778
|
+
// Clear canvas
|
779
|
+
ctx.clearRect(0, 0, width, height);
|
780
|
+
|
781
|
+
if (data.length === 0) {
|
782
|
+
ctx.fillStyle = '#666';
|
783
|
+
ctx.font = '14px Arial';
|
784
|
+
ctx.textAlign = 'center';
|
785
|
+
ctx.fillText('No data available', width/2, height/2);
|
786
|
+
return;
|
787
|
+
}
|
788
|
+
|
789
|
+
// Calculate scales
|
790
|
+
const maxValue = Math.max(...data, 1);
|
791
|
+
const padding = 40;
|
792
|
+
const chartWidth = width - 2 * padding;
|
793
|
+
const chartHeight = height - 2 * padding;
|
794
|
+
|
795
|
+
// Draw axes
|
796
|
+
ctx.strokeStyle = '#ddd';
|
797
|
+
ctx.lineWidth = 1;
|
798
|
+
ctx.beginPath();
|
799
|
+
ctx.moveTo(padding, padding);
|
800
|
+
ctx.lineTo(padding, height - padding);
|
801
|
+
ctx.lineTo(width - padding, height - padding);
|
802
|
+
ctx.stroke();
|
803
|
+
|
804
|
+
// Draw data line
|
805
|
+
if (data.length > 1) {
|
806
|
+
ctx.strokeStyle = color;
|
807
|
+
ctx.lineWidth = 2;
|
808
|
+
ctx.beginPath();
|
809
|
+
|
810
|
+
for (let i = 0; i < data.length; i++) {
|
811
|
+
const x = padding + (i / (data.length - 1)) * chartWidth;
|
812
|
+
const y = height - padding - (data[i] / maxValue) * chartHeight;
|
813
|
+
|
814
|
+
if (i === 0) {
|
815
|
+
ctx.moveTo(x, y);
|
816
|
+
} else {
|
817
|
+
ctx.lineTo(x, y);
|
818
|
+
}
|
819
|
+
}
|
820
|
+
ctx.stroke();
|
821
|
+
|
822
|
+
// Draw data points
|
823
|
+
ctx.fillStyle = color;
|
824
|
+
for (let i = 0; i < data.length; i++) {
|
825
|
+
const x = padding + (i / (data.length - 1)) * chartWidth;
|
826
|
+
const y = height - padding - (data[i] / maxValue) * chartHeight;
|
827
|
+
|
828
|
+
ctx.beginPath();
|
829
|
+
ctx.arc(x, y, 3, 0, 2 * Math.PI);
|
830
|
+
ctx.fill();
|
831
|
+
}
|
832
|
+
}
|
833
|
+
|
834
|
+
// Draw labels
|
835
|
+
ctx.fillStyle = '#666';
|
836
|
+
ctx.font = '12px Arial';
|
837
|
+
ctx.textAlign = 'center';
|
838
|
+
ctx.fillText(`Max: ${maxValue.toFixed(1)}`, width - 60, padding + 15);
|
839
|
+
ctx.fillText('0', padding - 20, height - padding + 5);
|
840
|
+
}
|
841
|
+
|
842
|
+
// Auto-refresh functionality
|
843
|
+
setInterval(function() {
|
844
|
+
if (window.location.hash !== '#no-refresh') {
|
845
|
+
window.location.reload();
|
846
|
+
}
|
847
|
+
}, 30000); // Refresh every 30 seconds
|
848
|
+
"""
|
849
|
+
|
850
|
+
|
851
|
+
class DashboardExporter:
|
852
|
+
"""Utility class for exporting dashboard data and reports."""
|
853
|
+
|
854
|
+
def __init__(self, dashboard: RealTimeDashboard):
|
855
|
+
"""Initialize dashboard exporter.
|
856
|
+
|
857
|
+
Args:
|
858
|
+
dashboard: RealTimeDashboard instance
|
859
|
+
"""
|
860
|
+
self.dashboard = dashboard
|
861
|
+
self.logger = logger
|
862
|
+
|
863
|
+
def export_metrics_json(self, output_path: Union[str, Path]) -> Path:
|
864
|
+
"""Export current metrics as JSON.
|
865
|
+
|
866
|
+
Args:
|
867
|
+
output_path: Path to save JSON file
|
868
|
+
|
869
|
+
Returns:
|
870
|
+
Path to exported file
|
871
|
+
"""
|
872
|
+
output_path = Path(output_path)
|
873
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
874
|
+
|
875
|
+
# Get current data
|
876
|
+
current_metrics = self.dashboard.get_current_metrics()
|
877
|
+
metrics_history = self.dashboard.get_metrics_history()
|
878
|
+
|
879
|
+
data = {
|
880
|
+
"timestamp": datetime.now().isoformat(),
|
881
|
+
"current_metrics": (
|
882
|
+
self._metrics_to_dict(current_metrics) if current_metrics else None
|
883
|
+
),
|
884
|
+
"history": [self._metrics_to_dict(m) for m in metrics_history],
|
885
|
+
"config": {
|
886
|
+
"update_interval": self.dashboard.config.update_interval,
|
887
|
+
"theme": self.dashboard.config.theme,
|
888
|
+
"monitoring_active": self.dashboard._monitoring,
|
889
|
+
},
|
890
|
+
}
|
891
|
+
|
892
|
+
with open(output_path, "w") as f:
|
893
|
+
json.dump(data, f, indent=2, default=str)
|
894
|
+
|
895
|
+
self.logger.info(f"Exported metrics to: {output_path}")
|
896
|
+
return output_path
|
897
|
+
|
898
|
+
def _metrics_to_dict(self, metrics: LiveMetrics) -> Dict[str, Any]:
|
899
|
+
"""Convert LiveMetrics to dictionary."""
|
900
|
+
return {
|
901
|
+
"timestamp": metrics.timestamp.isoformat(),
|
902
|
+
"active_tasks": metrics.active_tasks,
|
903
|
+
"completed_tasks": metrics.completed_tasks,
|
904
|
+
"failed_tasks": metrics.failed_tasks,
|
905
|
+
"total_cpu_usage": metrics.total_cpu_usage,
|
906
|
+
"total_memory_usage": metrics.total_memory_usage,
|
907
|
+
"throughput": metrics.throughput,
|
908
|
+
"avg_task_duration": metrics.avg_task_duration,
|
909
|
+
}
|
910
|
+
|
911
|
+
def create_dashboard_snapshot(
|
912
|
+
self, output_dir: Union[str, Path], include_static_charts: bool = True
|
913
|
+
) -> Dict[str, Path]:
|
914
|
+
"""Create complete dashboard snapshot with all assets.
|
915
|
+
|
916
|
+
Args:
|
917
|
+
output_dir: Directory to save snapshot
|
918
|
+
include_static_charts: Whether to generate static charts
|
919
|
+
|
920
|
+
Returns:
|
921
|
+
Dictionary mapping asset names to file paths
|
922
|
+
"""
|
923
|
+
output_dir = Path(output_dir)
|
924
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
925
|
+
|
926
|
+
assets = {}
|
927
|
+
|
928
|
+
# Generate live dashboard HTML
|
929
|
+
dashboard_path = output_dir / "dashboard.html"
|
930
|
+
self.dashboard.generate_live_report(dashboard_path)
|
931
|
+
assets["dashboard"] = dashboard_path
|
932
|
+
|
933
|
+
# Export metrics JSON
|
934
|
+
metrics_path = output_dir / "metrics.json"
|
935
|
+
self.export_metrics_json(metrics_path)
|
936
|
+
assets["metrics"] = metrics_path
|
937
|
+
|
938
|
+
# Generate static performance charts if requested
|
939
|
+
if include_static_charts and self.dashboard._current_run_id:
|
940
|
+
try:
|
941
|
+
chart_outputs = (
|
942
|
+
self.dashboard.performance_viz.create_run_performance_summary(
|
943
|
+
self.dashboard._current_run_id, output_dir
|
944
|
+
)
|
945
|
+
)
|
946
|
+
assets.update(chart_outputs)
|
947
|
+
except Exception as e:
|
948
|
+
self.logger.warning(f"Failed to generate static charts: {e}")
|
949
|
+
|
950
|
+
self.logger.info(f"Created dashboard snapshot in: {output_dir}")
|
951
|
+
return assets
|