kailash 0.1.5__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/access_control.py +740 -0
- kailash/api/__main__.py +6 -0
- kailash/api/auth.py +668 -0
- kailash/api/custom_nodes.py +285 -0
- kailash/api/custom_nodes_secure.py +377 -0
- kailash/api/database.py +620 -0
- kailash/api/studio.py +915 -0
- kailash/api/studio_secure.py +893 -0
- kailash/mcp/__init__.py +53 -0
- kailash/mcp/__main__.py +13 -0
- kailash/mcp/ai_registry_server.py +712 -0
- kailash/mcp/client.py +447 -0
- kailash/mcp/client_new.py +334 -0
- kailash/mcp/server.py +293 -0
- kailash/mcp/server_new.py +336 -0
- kailash/mcp/servers/__init__.py +12 -0
- kailash/mcp/servers/ai_registry.py +289 -0
- kailash/nodes/__init__.py +4 -2
- kailash/nodes/ai/__init__.py +2 -0
- kailash/nodes/ai/a2a.py +714 -67
- kailash/nodes/ai/intelligent_agent_orchestrator.py +31 -37
- kailash/nodes/ai/iterative_llm_agent.py +1280 -0
- kailash/nodes/ai/llm_agent.py +324 -1
- kailash/nodes/ai/self_organizing.py +5 -6
- kailash/nodes/base.py +15 -2
- kailash/nodes/base_async.py +45 -0
- kailash/nodes/base_cycle_aware.py +374 -0
- kailash/nodes/base_with_acl.py +338 -0
- kailash/nodes/code/python.py +135 -27
- kailash/nodes/data/__init__.py +1 -2
- kailash/nodes/data/readers.py +16 -6
- kailash/nodes/data/sql.py +699 -256
- kailash/nodes/data/writers.py +16 -6
- kailash/nodes/logic/__init__.py +8 -0
- kailash/nodes/logic/convergence.py +642 -0
- kailash/nodes/logic/loop.py +153 -0
- kailash/nodes/logic/operations.py +187 -27
- kailash/nodes/mixins/__init__.py +11 -0
- kailash/nodes/mixins/mcp.py +228 -0
- kailash/nodes/mixins.py +387 -0
- kailash/runtime/__init__.py +2 -1
- kailash/runtime/access_controlled.py +458 -0
- kailash/runtime/local.py +106 -33
- kailash/runtime/parallel_cyclic.py +529 -0
- kailash/sdk_exceptions.py +90 -5
- kailash/security.py +845 -0
- kailash/tracking/manager.py +38 -15
- kailash/tracking/models.py +1 -1
- kailash/tracking/storage/filesystem.py +30 -2
- kailash/utils/__init__.py +8 -0
- kailash/workflow/__init__.py +18 -0
- kailash/workflow/convergence.py +270 -0
- kailash/workflow/cycle_analyzer.py +889 -0
- kailash/workflow/cycle_builder.py +579 -0
- kailash/workflow/cycle_config.py +725 -0
- kailash/workflow/cycle_debugger.py +860 -0
- kailash/workflow/cycle_exceptions.py +615 -0
- kailash/workflow/cycle_profiler.py +741 -0
- kailash/workflow/cycle_state.py +338 -0
- kailash/workflow/cyclic_runner.py +985 -0
- kailash/workflow/graph.py +500 -39
- kailash/workflow/migration.py +809 -0
- kailash/workflow/safety.py +365 -0
- kailash/workflow/templates.py +763 -0
- kailash/workflow/validation.py +751 -0
- {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/METADATA +259 -12
- kailash-0.2.1.dist-info/RECORD +125 -0
- kailash/nodes/mcp/__init__.py +0 -11
- kailash/nodes/mcp/client.py +0 -554
- kailash/nodes/mcp/resource.py +0 -682
- kailash/nodes/mcp/server.py +0 -577
- kailash-0.1.5.dist-info/RECORD +0 -88
- {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/WHEEL +0 -0
- {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,889 @@
|
|
1
|
+
"""
|
2
|
+
Cycle Analysis and Performance Monitoring for Cyclic Workflows.
|
3
|
+
|
4
|
+
This module provides comprehensive analysis tools that combine debugging and
|
5
|
+
profiling capabilities to deliver deep insights into cycle behavior, performance
|
6
|
+
characteristics, and optimization opportunities. It serves as the primary
|
7
|
+
analysis interface for understanding and improving cyclic workflow execution.
|
8
|
+
|
9
|
+
Examples:
|
10
|
+
Comprehensive cycle analysis:
|
11
|
+
|
12
|
+
>>> analyzer = CycleAnalyzer(
|
13
|
+
... analysis_level="comprehensive",
|
14
|
+
... output_directory="./analysis_results"
|
15
|
+
... )
|
16
|
+
>>> # Start analysis session
|
17
|
+
>>> session = analyzer.start_analysis_session("optimization_study")
|
18
|
+
>>> # Analyze cycle execution
|
19
|
+
>>> trace = analyzer.start_cycle_analysis("opt_cycle", "workflow_1")
|
20
|
+
>>> analyzer.track_iteration(trace, input_data, output_data, 0.05)
|
21
|
+
>>> analyzer.complete_cycle_analysis(trace, True, "convergence")
|
22
|
+
>>> # Generate comprehensive report
|
23
|
+
>>> report = analyzer.generate_session_report()
|
24
|
+
>>> analyzer.export_analysis_data("analysis_results.json")
|
25
|
+
|
26
|
+
Real-time monitoring:
|
27
|
+
|
28
|
+
>>> # Monitor active cycle
|
29
|
+
>>> metrics = analyzer.get_real_time_metrics(trace)
|
30
|
+
>>> if metrics['health_score'] < 0.5:
|
31
|
+
... print("Performance issue detected!")
|
32
|
+
... print(f"Alerts: {metrics['alerts']}")
|
33
|
+
"""
|
34
|
+
|
35
|
+
import json
|
36
|
+
import logging
|
37
|
+
from datetime import datetime
|
38
|
+
from pathlib import Path
|
39
|
+
from typing import Any, Dict, List, Optional
|
40
|
+
|
41
|
+
from kailash.workflow.cycle_debugger import CycleDebugger, CycleExecutionTrace
|
42
|
+
from kailash.workflow.cycle_profiler import CycleProfiler
|
43
|
+
|
44
|
+
logger = logging.getLogger(__name__)
|
45
|
+
|
46
|
+
|
47
|
+
class CycleAnalyzer:
|
48
|
+
"""
|
49
|
+
Comprehensive analysis tool combining debugging and profiling capabilities.
|
50
|
+
|
51
|
+
This class provides a unified interface for cycle analysis, combining
|
52
|
+
the detailed tracking capabilities of CycleDebugger with the performance
|
53
|
+
insights of CycleProfiler to provide comprehensive cycle optimization
|
54
|
+
guidance and health monitoring.
|
55
|
+
|
56
|
+
Examples:
|
57
|
+
>>> analyzer = CycleAnalyzer(analysis_level="comprehensive")
|
58
|
+
>>> # Start analysis
|
59
|
+
>>> session = analyzer.start_analysis_session("optimization_study")
|
60
|
+
>>> trace = analyzer.start_cycle_analysis("cycle_1", "workflow_1")
|
61
|
+
>>> # During execution...
|
62
|
+
>>> analyzer.track_iteration(trace, input_data, output_data)
|
63
|
+
>>> # Complete analysis
|
64
|
+
>>> analyzer.complete_cycle_analysis(trace, converged=True)
|
65
|
+
>>> report = analyzer.generate_comprehensive_report(session)
|
66
|
+
"""
|
67
|
+
|
68
|
+
def __init__(
|
69
|
+
self,
|
70
|
+
analysis_level: str = "standard",
|
71
|
+
enable_profiling: bool = True,
|
72
|
+
enable_debugging: bool = True,
|
73
|
+
output_directory: Optional[str] = None,
|
74
|
+
):
|
75
|
+
"""
|
76
|
+
Initialize cycle analyzer.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
analysis_level: Level of analysis ("basic", "standard", "comprehensive").
|
80
|
+
enable_profiling: Whether to enable performance profiling.
|
81
|
+
enable_debugging: Whether to enable detailed debugging.
|
82
|
+
output_directory: Directory for analysis output files.
|
83
|
+
"""
|
84
|
+
self.analysis_level = analysis_level
|
85
|
+
self.enable_profiling = enable_profiling
|
86
|
+
self.enable_debugging = enable_debugging
|
87
|
+
self.output_directory = Path(output_directory) if output_directory else None
|
88
|
+
|
89
|
+
# Initialize components based on configuration
|
90
|
+
debug_level = {
|
91
|
+
"basic": "basic",
|
92
|
+
"standard": "detailed",
|
93
|
+
"comprehensive": "verbose",
|
94
|
+
}.get(analysis_level, "detailed")
|
95
|
+
|
96
|
+
self.debugger = (
|
97
|
+
CycleDebugger(debug_level=debug_level, enable_profiling=enable_profiling)
|
98
|
+
if enable_debugging
|
99
|
+
else None
|
100
|
+
)
|
101
|
+
|
102
|
+
self.profiler = (
|
103
|
+
CycleProfiler(enable_advanced_metrics=(analysis_level == "comprehensive"))
|
104
|
+
if enable_profiling
|
105
|
+
else None
|
106
|
+
)
|
107
|
+
|
108
|
+
# Analysis session tracking
|
109
|
+
self.current_session: Optional[str] = None
|
110
|
+
self.session_traces: List[CycleExecutionTrace] = []
|
111
|
+
self.analysis_history: List[Dict[str, Any]] = []
|
112
|
+
|
113
|
+
# Create output directory if specified
|
114
|
+
if self.output_directory:
|
115
|
+
self.output_directory.mkdir(parents=True, exist_ok=True)
|
116
|
+
logger.info(f"Analysis output directory: {self.output_directory}")
|
117
|
+
|
118
|
+
def start_analysis_session(self, session_id: str) -> str:
|
119
|
+
"""
|
120
|
+
Start a new analysis session for grouping related cycles.
|
121
|
+
|
122
|
+
Analysis sessions allow grouping multiple cycle executions for
|
123
|
+
comparative analysis, trend identification, and comprehensive
|
124
|
+
reporting across related workflow executions.
|
125
|
+
|
126
|
+
Args:
|
127
|
+
session_id: Unique identifier for the analysis session.
|
128
|
+
|
129
|
+
Returns:
|
130
|
+
Session ID for reference.
|
131
|
+
|
132
|
+
Examples:
|
133
|
+
>>> session = analyzer.start_analysis_session("optimization_experiment_1")
|
134
|
+
"""
|
135
|
+
self.current_session = session_id
|
136
|
+
self.session_traces = []
|
137
|
+
|
138
|
+
logger.info(f"Started analysis session: {session_id}")
|
139
|
+
return session_id
|
140
|
+
|
141
|
+
def start_cycle_analysis(
|
142
|
+
self,
|
143
|
+
cycle_id: str,
|
144
|
+
workflow_id: str,
|
145
|
+
max_iterations: Optional[int] = None,
|
146
|
+
timeout: Optional[float] = None,
|
147
|
+
convergence_condition: Optional[str] = None,
|
148
|
+
) -> Optional[CycleExecutionTrace]:
|
149
|
+
"""
|
150
|
+
Start analysis for a new cycle execution.
|
151
|
+
|
152
|
+
Begins comprehensive tracking for a cycle execution, including
|
153
|
+
debugging and profiling as configured. Returns a trace object
|
154
|
+
for tracking iteration progress.
|
155
|
+
|
156
|
+
Args:
|
157
|
+
cycle_id: Unique identifier for the cycle.
|
158
|
+
workflow_id: Parent workflow identifier.
|
159
|
+
max_iterations: Configured iteration limit.
|
160
|
+
timeout: Configured timeout limit.
|
161
|
+
convergence_condition: Convergence condition.
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
Trace object for tracking, or None if debugging disabled.
|
165
|
+
|
166
|
+
Examples:
|
167
|
+
>>> trace = analyzer.start_cycle_analysis("opt_cycle", "workflow_1", max_iterations=100)
|
168
|
+
"""
|
169
|
+
if not self.debugger:
|
170
|
+
logger.warning("Debugging not enabled - cannot create trace")
|
171
|
+
return None
|
172
|
+
|
173
|
+
trace = self.debugger.start_cycle(
|
174
|
+
cycle_id=cycle_id,
|
175
|
+
workflow_id=workflow_id,
|
176
|
+
max_iterations=max_iterations,
|
177
|
+
timeout=timeout,
|
178
|
+
convergence_condition=convergence_condition,
|
179
|
+
)
|
180
|
+
|
181
|
+
logger.info(
|
182
|
+
f"Started cycle analysis for '{cycle_id}' in session '{self.current_session}'"
|
183
|
+
)
|
184
|
+
return trace
|
185
|
+
|
186
|
+
def track_iteration(
|
187
|
+
self,
|
188
|
+
trace: CycleExecutionTrace,
|
189
|
+
input_data: Dict[str, Any],
|
190
|
+
output_data: Dict[str, Any],
|
191
|
+
convergence_value: Optional[float] = None,
|
192
|
+
node_executions: Optional[List[str]] = None,
|
193
|
+
):
|
194
|
+
"""
|
195
|
+
Track a single cycle iteration with input/output data.
|
196
|
+
|
197
|
+
Records detailed information about a cycle iteration including
|
198
|
+
timing, resource usage, convergence metrics, and execution flow
|
199
|
+
for comprehensive analysis.
|
200
|
+
|
201
|
+
Args:
|
202
|
+
trace: Active trace object.
|
203
|
+
input_data: Input data for the iteration.
|
204
|
+
output_data: Output data from the iteration.
|
205
|
+
convergence_value: Convergence metric if available.
|
206
|
+
node_executions: List of executed nodes.
|
207
|
+
|
208
|
+
Examples:
|
209
|
+
>>> analyzer.track_iteration(trace, input_data, output_data, convergence_value=0.05)
|
210
|
+
"""
|
211
|
+
if not self.debugger:
|
212
|
+
return
|
213
|
+
|
214
|
+
iteration = self.debugger.start_iteration(trace, input_data)
|
215
|
+
self.debugger.end_iteration(
|
216
|
+
trace, iteration, output_data, convergence_value, node_executions
|
217
|
+
)
|
218
|
+
|
219
|
+
if self.analysis_level == "comprehensive":
|
220
|
+
logger.debug(
|
221
|
+
f"Tracked iteration {iteration.iteration_number} for cycle '{trace.cycle_id}' "
|
222
|
+
f"with convergence={convergence_value}"
|
223
|
+
)
|
224
|
+
|
225
|
+
def complete_cycle_analysis(
|
226
|
+
self,
|
227
|
+
trace: CycleExecutionTrace,
|
228
|
+
converged: bool,
|
229
|
+
termination_reason: str,
|
230
|
+
convergence_iteration: Optional[int] = None,
|
231
|
+
):
|
232
|
+
"""
|
233
|
+
Complete cycle analysis and generate insights.
|
234
|
+
|
235
|
+
Finalizes cycle tracking and performs comprehensive analysis
|
236
|
+
including performance metrics, optimization recommendations,
|
237
|
+
and comparative insights if multiple cycles are available.
|
238
|
+
|
239
|
+
Args:
|
240
|
+
trace: Cycle trace to complete.
|
241
|
+
converged: Whether the cycle converged successfully.
|
242
|
+
termination_reason: Why the cycle terminated.
|
243
|
+
convergence_iteration: Iteration where convergence occurred.
|
244
|
+
|
245
|
+
Examples:
|
246
|
+
>>> analyzer.complete_cycle_analysis(trace, converged=True, termination_reason="convergence")
|
247
|
+
"""
|
248
|
+
if not self.debugger:
|
249
|
+
return
|
250
|
+
|
251
|
+
# Complete debugging
|
252
|
+
self.debugger.end_cycle(
|
253
|
+
trace, converged, termination_reason, convergence_iteration
|
254
|
+
)
|
255
|
+
|
256
|
+
# Add to profiler for performance analysis
|
257
|
+
if self.profiler:
|
258
|
+
self.profiler.add_trace(trace)
|
259
|
+
|
260
|
+
# Add to session traces
|
261
|
+
self.session_traces.append(trace)
|
262
|
+
|
263
|
+
logger.info(
|
264
|
+
f"Completed cycle analysis for '{trace.cycle_id}' - "
|
265
|
+
f"converged={converged}, iterations={len(trace.iterations)}"
|
266
|
+
)
|
267
|
+
|
268
|
+
# Generate immediate insights for comprehensive analysis
|
269
|
+
if self.analysis_level == "comprehensive":
|
270
|
+
self._generate_immediate_insights(trace)
|
271
|
+
|
272
|
+
def generate_cycle_report(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
|
273
|
+
"""
|
274
|
+
Generate comprehensive report for a single cycle.
|
275
|
+
|
276
|
+
Creates a detailed analysis report for a specific cycle execution
|
277
|
+
including debugging information, performance metrics, and
|
278
|
+
optimization recommendations.
|
279
|
+
|
280
|
+
Args:
|
281
|
+
trace: Completed cycle trace.
|
282
|
+
|
283
|
+
Returns:
|
284
|
+
Comprehensive cycle analysis report.
|
285
|
+
|
286
|
+
Examples:
|
287
|
+
>>> report = analyzer.generate_cycle_report(trace)
|
288
|
+
>>> print(f"Cycle efficiency: {report['performance']['efficiency_score']}")
|
289
|
+
"""
|
290
|
+
report = {
|
291
|
+
"analysis_info": {
|
292
|
+
"cycle_id": trace.cycle_id,
|
293
|
+
"workflow_id": trace.workflow_id,
|
294
|
+
"analysis_level": self.analysis_level,
|
295
|
+
"session_id": self.current_session,
|
296
|
+
"generated_at": datetime.now().isoformat(),
|
297
|
+
}
|
298
|
+
}
|
299
|
+
|
300
|
+
# Add debugging information
|
301
|
+
if self.debugger:
|
302
|
+
debug_report = self.debugger.generate_report(trace)
|
303
|
+
report["debugging"] = debug_report
|
304
|
+
|
305
|
+
# Add profiling information
|
306
|
+
if self.profiler:
|
307
|
+
# Create temporary profiler for single trace analysis
|
308
|
+
single_profiler = CycleProfiler(
|
309
|
+
enable_advanced_metrics=(self.analysis_level == "comprehensive")
|
310
|
+
)
|
311
|
+
single_profiler.add_trace(trace)
|
312
|
+
|
313
|
+
performance_metrics = single_profiler.analyze_performance()
|
314
|
+
recommendations = single_profiler.get_optimization_recommendations(trace)
|
315
|
+
|
316
|
+
report["performance"] = performance_metrics.to_dict()
|
317
|
+
report["recommendations"] = recommendations
|
318
|
+
|
319
|
+
# Add analysis-level specific insights
|
320
|
+
if self.analysis_level == "comprehensive":
|
321
|
+
report["advanced_analysis"] = self._generate_advanced_analysis(trace)
|
322
|
+
|
323
|
+
# Export to file if configured
|
324
|
+
if self.output_directory:
|
325
|
+
self._export_cycle_report(report, trace.cycle_id)
|
326
|
+
|
327
|
+
return report
|
328
|
+
|
329
|
+
def generate_session_report(
|
330
|
+
self, session_id: Optional[str] = None
|
331
|
+
) -> Dict[str, Any]:
|
332
|
+
"""
|
333
|
+
Generate comprehensive report for an analysis session.
|
334
|
+
|
335
|
+
Creates a detailed analysis report covering all cycles in a session,
|
336
|
+
including comparative analysis, trend identification, and overall
|
337
|
+
optimization recommendations.
|
338
|
+
|
339
|
+
Args:
|
340
|
+
session_id: Session to analyze, or current session if None.
|
341
|
+
|
342
|
+
Returns:
|
343
|
+
Comprehensive session analysis report.
|
344
|
+
|
345
|
+
Examples:
|
346
|
+
>>> report = analyzer.generate_session_report()
|
347
|
+
>>> print(f"Best cycle: {report['comparative_analysis']['best_cycle']}")
|
348
|
+
"""
|
349
|
+
target_session = session_id or self.current_session
|
350
|
+
traces_to_analyze = self.session_traces if session_id is None else []
|
351
|
+
|
352
|
+
report = {
|
353
|
+
"session_info": {
|
354
|
+
"session_id": target_session,
|
355
|
+
"analysis_level": self.analysis_level,
|
356
|
+
"cycles_analyzed": len(traces_to_analyze),
|
357
|
+
"generated_at": datetime.now().isoformat(),
|
358
|
+
},
|
359
|
+
"summary": {
|
360
|
+
"total_cycles": len(traces_to_analyze),
|
361
|
+
"total_iterations": sum(
|
362
|
+
len(trace.iterations) for trace in traces_to_analyze
|
363
|
+
),
|
364
|
+
"convergence_rate": (
|
365
|
+
len([t for t in traces_to_analyze if t.converged])
|
366
|
+
/ len(traces_to_analyze)
|
367
|
+
if traces_to_analyze
|
368
|
+
else 0
|
369
|
+
),
|
370
|
+
"avg_cycle_time": (
|
371
|
+
sum(t.total_execution_time or 0 for t in traces_to_analyze)
|
372
|
+
/ len(traces_to_analyze)
|
373
|
+
if traces_to_analyze
|
374
|
+
else 0
|
375
|
+
),
|
376
|
+
},
|
377
|
+
}
|
378
|
+
|
379
|
+
if not traces_to_analyze:
|
380
|
+
report["warning"] = "No traces available for analysis"
|
381
|
+
return report
|
382
|
+
|
383
|
+
# Add profiling analysis
|
384
|
+
if self.profiler and traces_to_analyze:
|
385
|
+
# Ensure all traces are in profiler
|
386
|
+
for trace in traces_to_analyze:
|
387
|
+
if trace not in self.profiler.traces:
|
388
|
+
self.profiler.add_trace(trace)
|
389
|
+
|
390
|
+
performance_report = self.profiler.generate_performance_report()
|
391
|
+
report["performance_analysis"] = performance_report
|
392
|
+
|
393
|
+
# Add comparative analysis
|
394
|
+
if len(traces_to_analyze) >= 2:
|
395
|
+
cycle_ids = [trace.cycle_id for trace in traces_to_analyze]
|
396
|
+
comparison = (
|
397
|
+
self.profiler.compare_cycles(cycle_ids) if self.profiler else {}
|
398
|
+
)
|
399
|
+
report["comparative_analysis"] = comparison
|
400
|
+
|
401
|
+
# Add session-specific insights
|
402
|
+
report["insights"] = self._generate_session_insights(traces_to_analyze)
|
403
|
+
|
404
|
+
# Export to file if configured
|
405
|
+
if self.output_directory:
|
406
|
+
self._export_session_report(report, target_session)
|
407
|
+
|
408
|
+
return report
|
409
|
+
|
410
|
+
def get_real_time_metrics(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
|
411
|
+
"""
|
412
|
+
Get real-time metrics for an active cycle.
|
413
|
+
|
414
|
+
Provides current performance metrics and health indicators
|
415
|
+
for a cycle that is currently executing, enabling real-time
|
416
|
+
monitoring and early intervention if issues are detected.
|
417
|
+
|
418
|
+
Args:
|
419
|
+
trace: Active cycle trace.
|
420
|
+
|
421
|
+
Returns:
|
422
|
+
Dict[str, Any]: Real-time metrics and health indicators
|
423
|
+
|
424
|
+
Side Effects:
|
425
|
+
None - this is a pure analysis method
|
426
|
+
|
427
|
+
Example:
|
428
|
+
>>> metrics = analyzer.get_real_time_metrics(trace)
|
429
|
+
>>> if metrics['health_score'] < 0.5:
|
430
|
+
... print("Cycle performance issue detected!")
|
431
|
+
"""
|
432
|
+
if not trace.iterations:
|
433
|
+
return {"status": "no_iterations", "health_score": 0.5}
|
434
|
+
|
435
|
+
recent_iterations = trace.iterations[-5:] # Last 5 iterations
|
436
|
+
|
437
|
+
# Calculate real-time performance indicators
|
438
|
+
avg_recent_time = sum(
|
439
|
+
iter.execution_time or 0 for iter in recent_iterations
|
440
|
+
) / len(recent_iterations)
|
441
|
+
|
442
|
+
# Memory trend (if available)
|
443
|
+
memory_values = [
|
444
|
+
iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb
|
445
|
+
]
|
446
|
+
memory_trend = "stable"
|
447
|
+
if len(memory_values) >= 2:
|
448
|
+
if memory_values[-1] > memory_values[0] * 1.2:
|
449
|
+
memory_trend = "increasing"
|
450
|
+
elif memory_values[-1] < memory_values[0] * 0.8:
|
451
|
+
memory_trend = "decreasing"
|
452
|
+
|
453
|
+
# Convergence trend
|
454
|
+
convergence_values = [
|
455
|
+
iter.convergence_value
|
456
|
+
for iter in recent_iterations
|
457
|
+
if iter.convergence_value
|
458
|
+
]
|
459
|
+
convergence_trend = "unknown"
|
460
|
+
if len(convergence_values) >= 2:
|
461
|
+
if convergence_values[-1] < convergence_values[0]:
|
462
|
+
convergence_trend = "improving"
|
463
|
+
elif convergence_values[-1] > convergence_values[0]:
|
464
|
+
convergence_trend = "degrading"
|
465
|
+
else:
|
466
|
+
convergence_trend = "stable"
|
467
|
+
|
468
|
+
# Health score calculation
|
469
|
+
health_score = self._calculate_real_time_health_score(trace, recent_iterations)
|
470
|
+
|
471
|
+
return {
|
472
|
+
"status": "active",
|
473
|
+
"current_iteration": len(trace.iterations),
|
474
|
+
"avg_recent_iteration_time": avg_recent_time,
|
475
|
+
"memory_trend": memory_trend,
|
476
|
+
"convergence_trend": convergence_trend,
|
477
|
+
"health_score": health_score,
|
478
|
+
"alerts": self._generate_real_time_alerts(trace, recent_iterations),
|
479
|
+
}
|
480
|
+
|
481
|
+
def export_analysis_data(
|
482
|
+
self,
|
483
|
+
filepath: Optional[str] = None,
|
484
|
+
format: str = "json",
|
485
|
+
include_traces: bool = True,
|
486
|
+
):
|
487
|
+
"""
|
488
|
+
Export comprehensive analysis data.
|
489
|
+
|
490
|
+
Exports all analysis data including traces, performance metrics,
|
491
|
+
and reports for external analysis, archival, or sharing.
|
492
|
+
|
493
|
+
Args:
|
494
|
+
filepath (Optional[str]): Output file path, auto-generated if None
|
495
|
+
format (str): Export format ("json", "csv")
|
496
|
+
include_traces (bool): Whether to include detailed trace data
|
497
|
+
|
498
|
+
Side Effects:
|
499
|
+
Creates export file with analysis data
|
500
|
+
|
501
|
+
Example:
|
502
|
+
>>> analyzer.export_analysis_data("cycle_analysis.json", include_traces=True)
|
503
|
+
"""
|
504
|
+
if filepath is None:
|
505
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
506
|
+
filepath = f"cycle_analysis_{self.current_session or 'session'}_{timestamp}.{format}"
|
507
|
+
if self.output_directory:
|
508
|
+
filepath = str(self.output_directory / filepath)
|
509
|
+
|
510
|
+
export_data = {
|
511
|
+
"analysis_metadata": {
|
512
|
+
"session_id": self.current_session,
|
513
|
+
"analysis_level": self.analysis_level,
|
514
|
+
"export_timestamp": datetime.now().isoformat(),
|
515
|
+
"cycles_count": len(self.session_traces),
|
516
|
+
}
|
517
|
+
}
|
518
|
+
|
519
|
+
# Include session report
|
520
|
+
if self.session_traces:
|
521
|
+
export_data["session_report"] = self.generate_session_report()
|
522
|
+
|
523
|
+
# Include individual cycle reports
|
524
|
+
if include_traces:
|
525
|
+
export_data["cycle_reports"] = [
|
526
|
+
self.generate_cycle_report(trace) for trace in self.session_traces
|
527
|
+
]
|
528
|
+
|
529
|
+
# Include performance history if available
|
530
|
+
if self.profiler:
|
531
|
+
export_data["performance_history"] = [
|
532
|
+
metrics.to_dict() for metrics in self.profiler.performance_history
|
533
|
+
]
|
534
|
+
|
535
|
+
# Export to file
|
536
|
+
if format == "json":
|
537
|
+
with open(filepath, "w") as f:
|
538
|
+
json.dump(export_data, f, indent=2)
|
539
|
+
elif format == "csv":
|
540
|
+
# For CSV, export summary data only
|
541
|
+
import csv
|
542
|
+
|
543
|
+
with open(filepath, "w", newline="") as f:
|
544
|
+
writer = csv.writer(f)
|
545
|
+
|
546
|
+
# Write header
|
547
|
+
writer.writerow(
|
548
|
+
[
|
549
|
+
"cycle_id",
|
550
|
+
"workflow_id",
|
551
|
+
"iterations",
|
552
|
+
"execution_time",
|
553
|
+
"converged",
|
554
|
+
"efficiency_score",
|
555
|
+
]
|
556
|
+
)
|
557
|
+
|
558
|
+
# Write cycle data
|
559
|
+
for trace in self.session_traces:
|
560
|
+
stats = trace.get_statistics()
|
561
|
+
writer.writerow(
|
562
|
+
[
|
563
|
+
trace.cycle_id,
|
564
|
+
trace.workflow_id,
|
565
|
+
len(trace.iterations),
|
566
|
+
trace.total_execution_time,
|
567
|
+
trace.converged,
|
568
|
+
stats.get("efficiency_score", 0.0),
|
569
|
+
]
|
570
|
+
)
|
571
|
+
else:
|
572
|
+
raise ValueError(f"Unsupported export format: {format}")
|
573
|
+
|
574
|
+
logger.info(f"Exported analysis data to {filepath} in {format} format")
|
575
|
+
|
576
|
+
def _generate_immediate_insights(self, trace: CycleExecutionTrace):
|
577
|
+
"""Generate immediate insights for a completed cycle."""
|
578
|
+
stats = trace.get_statistics()
|
579
|
+
|
580
|
+
# Log key insights
|
581
|
+
if stats["efficiency_score"] > 0.8:
|
582
|
+
logger.info(
|
583
|
+
f"Excellent performance for cycle '{trace.cycle_id}' - efficiency: {stats['efficiency_score']:.2f}"
|
584
|
+
)
|
585
|
+
elif stats["efficiency_score"] < 0.3:
|
586
|
+
logger.warning(
|
587
|
+
f"Poor performance for cycle '{trace.cycle_id}' - efficiency: {stats['efficiency_score']:.2f}"
|
588
|
+
)
|
589
|
+
|
590
|
+
if not trace.converged:
|
591
|
+
logger.warning(
|
592
|
+
f"Cycle '{trace.cycle_id}' failed to converge - reason: {trace.termination_reason}"
|
593
|
+
)
|
594
|
+
|
595
|
+
# Check for performance issues
|
596
|
+
if stats["avg_iteration_time"] > 1.0:
|
597
|
+
logger.warning(
|
598
|
+
f"Slow iterations detected for cycle '{trace.cycle_id}' - avg: {stats['avg_iteration_time']:.3f}s"
|
599
|
+
)
|
600
|
+
|
601
|
+
def _generate_advanced_analysis(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
|
602
|
+
"""Generate advanced analysis insights for comprehensive mode."""
|
603
|
+
convergence_trend = trace.get_convergence_trend()
|
604
|
+
|
605
|
+
# Convergence pattern analysis
|
606
|
+
convergence_analysis = {}
|
607
|
+
if convergence_trend:
|
608
|
+
values = [value for _, value in convergence_trend if value is not None]
|
609
|
+
if len(values) >= 3:
|
610
|
+
# Calculate convergence velocity
|
611
|
+
velocity = (
|
612
|
+
(values[0] - values[-1]) / len(values) if len(values) > 1 else 0
|
613
|
+
)
|
614
|
+
convergence_analysis = {
|
615
|
+
"convergence_velocity": velocity,
|
616
|
+
"convergence_pattern": (
|
617
|
+
"fast"
|
618
|
+
if velocity > 0.1
|
619
|
+
else "slow" if velocity > 0.01 else "minimal"
|
620
|
+
),
|
621
|
+
"stability_score": self._calculate_convergence_stability(values),
|
622
|
+
}
|
623
|
+
|
624
|
+
# Iteration pattern analysis
|
625
|
+
iteration_times = [
|
626
|
+
iter.execution_time for iter in trace.iterations if iter.execution_time
|
627
|
+
]
|
628
|
+
iteration_analysis = {}
|
629
|
+
if iteration_times:
|
630
|
+
import statistics
|
631
|
+
|
632
|
+
iteration_analysis = {
|
633
|
+
"time_distribution": {
|
634
|
+
"mean": statistics.mean(iteration_times),
|
635
|
+
"median": statistics.median(iteration_times),
|
636
|
+
"mode": (
|
637
|
+
statistics.mode(iteration_times)
|
638
|
+
if len(set(iteration_times)) != len(iteration_times)
|
639
|
+
else None
|
640
|
+
),
|
641
|
+
"skewness": self._calculate_skewness(iteration_times),
|
642
|
+
},
|
643
|
+
"performance_trend": self._analyze_performance_trend(iteration_times),
|
644
|
+
}
|
645
|
+
|
646
|
+
return {
|
647
|
+
"convergence_analysis": convergence_analysis,
|
648
|
+
"iteration_analysis": iteration_analysis,
|
649
|
+
"resource_efficiency": self._analyze_resource_efficiency(trace),
|
650
|
+
}
|
651
|
+
|
652
|
+
def _generate_session_insights(
|
653
|
+
self, traces: List[CycleExecutionTrace]
|
654
|
+
) -> Dict[str, Any]:
|
655
|
+
"""Generate insights across multiple cycles in a session."""
|
656
|
+
if not traces:
|
657
|
+
return {}
|
658
|
+
|
659
|
+
# Find best and worst performing cycles
|
660
|
+
cycle_scores = {
|
661
|
+
trace.cycle_id: trace.get_statistics()["efficiency_score"]
|
662
|
+
for trace in traces
|
663
|
+
}
|
664
|
+
best_cycle = max(cycle_scores.items(), key=lambda x: x[1])
|
665
|
+
worst_cycle = min(cycle_scores.items(), key=lambda x: x[1])
|
666
|
+
|
667
|
+
# Identify patterns
|
668
|
+
convergence_rate = len([t for t in traces if t.converged]) / len(traces)
|
669
|
+
avg_iterations = sum(len(t.iterations) for t in traces) / len(traces)
|
670
|
+
|
671
|
+
insights = {
|
672
|
+
"best_performing_cycle": {"id": best_cycle[0], "score": best_cycle[1]},
|
673
|
+
"worst_performing_cycle": {"id": worst_cycle[0], "score": worst_cycle[1]},
|
674
|
+
"overall_convergence_rate": convergence_rate,
|
675
|
+
"avg_iterations_per_cycle": avg_iterations,
|
676
|
+
"performance_consistency": best_cycle[1]
|
677
|
+
- worst_cycle[1], # Lower is more consistent
|
678
|
+
"session_quality": (
|
679
|
+
"excellent"
|
680
|
+
if convergence_rate > 0.9 and cycle_scores[best_cycle[0]] > 0.8
|
681
|
+
else "good" if convergence_rate > 0.7 else "needs_improvement"
|
682
|
+
),
|
683
|
+
}
|
684
|
+
|
685
|
+
return insights
|
686
|
+
|
687
|
+
def _calculate_real_time_health_score(
|
688
|
+
self, trace: CycleExecutionTrace, recent_iterations: List
|
689
|
+
) -> float:
|
690
|
+
"""Calculate real-time health score for an active cycle."""
|
691
|
+
score_components = []
|
692
|
+
|
693
|
+
# Performance component
|
694
|
+
if recent_iterations:
|
695
|
+
avg_time = sum(
|
696
|
+
iter.execution_time or 0 for iter in recent_iterations
|
697
|
+
) / len(recent_iterations)
|
698
|
+
time_score = max(
|
699
|
+
0.0, 1.0 - min(1.0, avg_time / 2.0)
|
700
|
+
) # Penalty after 2s per iteration
|
701
|
+
score_components.append(time_score)
|
702
|
+
|
703
|
+
# Error rate component
|
704
|
+
error_count = len([iter for iter in recent_iterations if iter.error])
|
705
|
+
error_score = (
|
706
|
+
max(0.0, 1.0 - (error_count / len(recent_iterations)))
|
707
|
+
if recent_iterations
|
708
|
+
else 1.0
|
709
|
+
)
|
710
|
+
score_components.append(error_score)
|
711
|
+
|
712
|
+
# Memory trend component (if available)
|
713
|
+
memory_values = [
|
714
|
+
iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb
|
715
|
+
]
|
716
|
+
if memory_values and len(memory_values) >= 2:
|
717
|
+
memory_growth = (memory_values[-1] - memory_values[0]) / memory_values[0]
|
718
|
+
memory_score = max(
|
719
|
+
0.0, 1.0 - max(0.0, memory_growth)
|
720
|
+
) # Penalty for memory growth
|
721
|
+
score_components.append(memory_score)
|
722
|
+
|
723
|
+
return (
|
724
|
+
sum(score_components) / len(score_components) if score_components else 0.5
|
725
|
+
)
|
726
|
+
|
727
|
+
def _generate_real_time_alerts(
|
728
|
+
self, trace: CycleExecutionTrace, recent_iterations: List
|
729
|
+
) -> List[str]:
|
730
|
+
"""Generate real-time alerts for potential issues."""
|
731
|
+
alerts = []
|
732
|
+
|
733
|
+
# Check for slow iterations
|
734
|
+
if recent_iterations:
|
735
|
+
avg_time = sum(
|
736
|
+
iter.execution_time or 0 for iter in recent_iterations
|
737
|
+
) / len(recent_iterations)
|
738
|
+
if avg_time > 2.0:
|
739
|
+
alerts.append(f"Slow iterations detected: {avg_time:.2f}s average")
|
740
|
+
|
741
|
+
# Check for errors
|
742
|
+
error_count = len([iter for iter in recent_iterations if iter.error])
|
743
|
+
if error_count > 0:
|
744
|
+
alerts.append(
|
745
|
+
f"Errors detected in {error_count}/{len(recent_iterations)} recent iterations"
|
746
|
+
)
|
747
|
+
|
748
|
+
# Check for memory growth
|
749
|
+
memory_values = [
|
750
|
+
iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb
|
751
|
+
]
|
752
|
+
if len(memory_values) >= 2:
|
753
|
+
memory_growth = (memory_values[-1] - memory_values[0]) / memory_values[0]
|
754
|
+
if memory_growth > 0.2:
|
755
|
+
alerts.append(
|
756
|
+
f"Memory usage increasing: {memory_growth*100:.1f}% growth"
|
757
|
+
)
|
758
|
+
|
759
|
+
# Check for potential non-convergence
|
760
|
+
if len(trace.iterations) > (trace.max_iterations_configured or 100) * 0.8:
|
761
|
+
alerts.append(
|
762
|
+
f"Approaching max iterations: {len(trace.iterations)}/{trace.max_iterations_configured}"
|
763
|
+
)
|
764
|
+
|
765
|
+
return alerts
|
766
|
+
|
767
|
+
def _calculate_convergence_stability(self, values: List[float]) -> float:
|
768
|
+
"""Calculate stability score for convergence values."""
|
769
|
+
if len(values) < 2:
|
770
|
+
return 1.0
|
771
|
+
|
772
|
+
import statistics
|
773
|
+
|
774
|
+
mean_val = statistics.mean(values)
|
775
|
+
if mean_val == 0:
|
776
|
+
return 1.0
|
777
|
+
|
778
|
+
stddev = statistics.stdev(values)
|
779
|
+
cv = stddev / mean_val # Coefficient of variation
|
780
|
+
|
781
|
+
# Lower CV means more stable
|
782
|
+
return max(0.0, 1.0 - min(1.0, cv))
|
783
|
+
|
784
|
+
def _calculate_skewness(self, data: List[float]) -> float:
|
785
|
+
"""Calculate skewness of data distribution."""
|
786
|
+
if len(data) < 3:
|
787
|
+
return 0.0
|
788
|
+
|
789
|
+
import statistics
|
790
|
+
|
791
|
+
mean_val = statistics.mean(data)
|
792
|
+
n = len(data)
|
793
|
+
variance = sum((x - mean_val) ** 2 for x in data) / n
|
794
|
+
if variance == 0:
|
795
|
+
return 0.0
|
796
|
+
|
797
|
+
std_dev = variance**0.5
|
798
|
+
skewness = sum((x - mean_val) ** 3 for x in data) / (n * std_dev**3)
|
799
|
+
return skewness
|
800
|
+
|
801
|
+
def _analyze_performance_trend(self, iteration_times: List[float]) -> str:
|
802
|
+
"""Analyze performance trend over iterations."""
|
803
|
+
if len(iteration_times) < 3:
|
804
|
+
return "insufficient_data"
|
805
|
+
|
806
|
+
# Simple trend analysis
|
807
|
+
first_half = iteration_times[: len(iteration_times) // 2]
|
808
|
+
second_half = iteration_times[len(iteration_times) // 2 :]
|
809
|
+
|
810
|
+
import statistics
|
811
|
+
|
812
|
+
first_avg = statistics.mean(first_half)
|
813
|
+
second_avg = statistics.mean(second_half)
|
814
|
+
|
815
|
+
improvement = (first_avg - second_avg) / first_avg
|
816
|
+
|
817
|
+
if improvement > 0.1:
|
818
|
+
return "improving"
|
819
|
+
elif improvement < -0.1:
|
820
|
+
return "degrading"
|
821
|
+
else:
|
822
|
+
return "stable"
|
823
|
+
|
824
|
+
def _analyze_resource_efficiency(
|
825
|
+
self, trace: CycleExecutionTrace
|
826
|
+
) -> Dict[str, Any]:
|
827
|
+
"""Analyze resource usage efficiency."""
|
828
|
+
memory_values = [
|
829
|
+
iter.memory_usage_mb for iter in trace.iterations if iter.memory_usage_mb
|
830
|
+
]
|
831
|
+
cpu_values = [
|
832
|
+
iter.cpu_usage_percent
|
833
|
+
for iter in trace.iterations
|
834
|
+
if iter.cpu_usage_percent
|
835
|
+
]
|
836
|
+
|
837
|
+
efficiency = {}
|
838
|
+
|
839
|
+
if memory_values:
|
840
|
+
import statistics
|
841
|
+
|
842
|
+
efficiency["memory_efficiency"] = {
|
843
|
+
"peak_usage": max(memory_values),
|
844
|
+
"avg_usage": statistics.mean(memory_values),
|
845
|
+
"efficiency_score": max(
|
846
|
+
0.0, 1.0 - (max(memory_values) / 2000)
|
847
|
+
), # Penalty after 2GB
|
848
|
+
}
|
849
|
+
|
850
|
+
if cpu_values:
|
851
|
+
import statistics
|
852
|
+
|
853
|
+
efficiency["cpu_efficiency"] = {
|
854
|
+
"peak_usage": max(cpu_values),
|
855
|
+
"avg_usage": statistics.mean(cpu_values),
|
856
|
+
"efficiency_score": min(
|
857
|
+
1.0, statistics.mean(cpu_values) / 100
|
858
|
+
), # Higher CPU usage is better utilization
|
859
|
+
}
|
860
|
+
|
861
|
+
return efficiency
|
862
|
+
|
863
|
+
def _export_cycle_report(self, report: Dict[str, Any], cycle_id: str):
|
864
|
+
"""Export cycle report to file."""
|
865
|
+
if not self.output_directory:
|
866
|
+
return
|
867
|
+
|
868
|
+
filename = (
|
869
|
+
f"cycle_report_{cycle_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
870
|
+
)
|
871
|
+
filepath = self.output_directory / filename
|
872
|
+
|
873
|
+
with open(filepath, "w") as f:
|
874
|
+
json.dump(report, f, indent=2)
|
875
|
+
|
876
|
+
logger.debug(f"Exported cycle report to {filepath}")
|
877
|
+
|
878
|
+
def _export_session_report(self, report: Dict[str, Any], session_id: str):
|
879
|
+
"""Export session report to file."""
|
880
|
+
if not self.output_directory:
|
881
|
+
return
|
882
|
+
|
883
|
+
filename = f"session_report_{session_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
884
|
+
filepath = self.output_directory / filename
|
885
|
+
|
886
|
+
with open(filepath, "w") as f:
|
887
|
+
json.dump(report, f, indent=2)
|
888
|
+
|
889
|
+
logger.debug(f"Exported session report to {filepath}")
|