kailash 0.1.5__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +2 -0
  21. kailash/nodes/ai/a2a.py +714 -67
  22. kailash/nodes/ai/intelligent_agent_orchestrator.py +31 -37
  23. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  24. kailash/nodes/ai/llm_agent.py +324 -1
  25. kailash/nodes/ai/self_organizing.py +5 -6
  26. kailash/nodes/base.py +15 -2
  27. kailash/nodes/base_async.py +45 -0
  28. kailash/nodes/base_cycle_aware.py +374 -0
  29. kailash/nodes/base_with_acl.py +338 -0
  30. kailash/nodes/code/python.py +135 -27
  31. kailash/nodes/data/readers.py +16 -6
  32. kailash/nodes/data/writers.py +16 -6
  33. kailash/nodes/logic/__init__.py +8 -0
  34. kailash/nodes/logic/convergence.py +642 -0
  35. kailash/nodes/logic/loop.py +153 -0
  36. kailash/nodes/logic/operations.py +187 -27
  37. kailash/nodes/mixins/__init__.py +11 -0
  38. kailash/nodes/mixins/mcp.py +228 -0
  39. kailash/nodes/mixins.py +387 -0
  40. kailash/runtime/__init__.py +2 -1
  41. kailash/runtime/access_controlled.py +458 -0
  42. kailash/runtime/local.py +106 -33
  43. kailash/runtime/parallel_cyclic.py +529 -0
  44. kailash/sdk_exceptions.py +90 -5
  45. kailash/security.py +845 -0
  46. kailash/tracking/manager.py +38 -15
  47. kailash/tracking/models.py +1 -1
  48. kailash/tracking/storage/filesystem.py +30 -2
  49. kailash/utils/__init__.py +8 -0
  50. kailash/workflow/__init__.py +18 -0
  51. kailash/workflow/convergence.py +270 -0
  52. kailash/workflow/cycle_analyzer.py +768 -0
  53. kailash/workflow/cycle_builder.py +573 -0
  54. kailash/workflow/cycle_config.py +709 -0
  55. kailash/workflow/cycle_debugger.py +760 -0
  56. kailash/workflow/cycle_exceptions.py +601 -0
  57. kailash/workflow/cycle_profiler.py +671 -0
  58. kailash/workflow/cycle_state.py +338 -0
  59. kailash/workflow/cyclic_runner.py +985 -0
  60. kailash/workflow/graph.py +500 -39
  61. kailash/workflow/migration.py +768 -0
  62. kailash/workflow/safety.py +365 -0
  63. kailash/workflow/templates.py +744 -0
  64. kailash/workflow/validation.py +693 -0
  65. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/METADATA +256 -12
  66. kailash-0.2.0.dist-info/RECORD +125 -0
  67. kailash/nodes/mcp/__init__.py +0 -11
  68. kailash/nodes/mcp/client.py +0 -554
  69. kailash/nodes/mcp/resource.py +0 -682
  70. kailash/nodes/mcp/server.py +0 -577
  71. kailash-0.1.5.dist-info/RECORD +0 -88
  72. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/WHEEL +0 -0
  73. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/entry_points.txt +0 -0
  74. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/licenses/LICENSE +0 -0
  75. {kailash-0.1.5.dist-info → kailash-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,768 @@
1
+ """
2
+ Cycle Analysis and Performance Monitoring for Cyclic Workflows.
3
+
4
+ This module provides comprehensive analysis tools that combine debugging and
5
+ profiling capabilities to deliver deep insights into cycle behavior, performance
6
+ characteristics, and optimization opportunities. It serves as the primary
7
+ analysis interface for understanding and improving cyclic workflow execution.
8
+
9
+ Examples:
10
+ Comprehensive cycle analysis:
11
+
12
+ >>> analyzer = CycleAnalyzer(
13
+ ... analysis_level="comprehensive",
14
+ ... output_directory="./analysis_results"
15
+ ... )
16
+ >>> # Start analysis session
17
+ >>> session = analyzer.start_analysis_session("optimization_study")
18
+ >>> # Analyze cycle execution
19
+ >>> trace = analyzer.start_cycle_analysis("opt_cycle", "workflow_1")
20
+ >>> analyzer.track_iteration(trace, input_data, output_data, 0.05)
21
+ >>> analyzer.complete_cycle_analysis(trace, True, "convergence")
22
+ >>> # Generate comprehensive report
23
+ >>> report = analyzer.generate_session_report()
24
+ >>> analyzer.export_analysis_data("analysis_results.json")
25
+
26
+ Real-time monitoring:
27
+
28
+ >>> # Monitor active cycle
29
+ >>> metrics = analyzer.get_real_time_metrics(trace)
30
+ >>> if metrics['health_score'] < 0.5:
31
+ ... print("Performance issue detected!")
32
+ ... print(f"Alerts: {metrics['alerts']}")
33
+ """
34
+
35
+ import json
36
+ import logging
37
+ from datetime import datetime
38
+ from pathlib import Path
39
+ from typing import Any, Dict, List, Optional
40
+
41
+ from kailash.workflow.cycle_debugger import CycleDebugger, CycleExecutionTrace
42
+ from kailash.workflow.cycle_profiler import CycleProfiler
43
+
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ class CycleAnalyzer:
48
+ """
49
+ Comprehensive analysis tool combining debugging and profiling capabilities.
50
+
51
+ This class provides a unified interface for cycle analysis, combining
52
+ the detailed tracking capabilities of CycleDebugger with the performance
53
+ insights of CycleProfiler to provide comprehensive cycle optimization
54
+ guidance and health monitoring.
55
+
56
+ Examples:
57
+ >>> analyzer = CycleAnalyzer(analysis_level="comprehensive")
58
+ >>> # Start analysis
59
+ >>> session = analyzer.start_analysis_session("optimization_study")
60
+ >>> trace = analyzer.start_cycle_analysis("cycle_1", "workflow_1")
61
+ >>> # During execution...
62
+ >>> analyzer.track_iteration(trace, input_data, output_data)
63
+ >>> # Complete analysis
64
+ >>> analyzer.complete_cycle_analysis(trace, converged=True)
65
+ >>> report = analyzer.generate_comprehensive_report(session)
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ analysis_level: str = "standard",
71
+ enable_profiling: bool = True,
72
+ enable_debugging: bool = True,
73
+ output_directory: Optional[str] = None
74
+ ):
75
+ """
76
+ Initialize cycle analyzer.
77
+
78
+ Args:
79
+ analysis_level: Level of analysis ("basic", "standard", "comprehensive").
80
+ enable_profiling: Whether to enable performance profiling.
81
+ enable_debugging: Whether to enable detailed debugging.
82
+ output_directory: Directory for analysis output files.
83
+ """
84
+ self.analysis_level = analysis_level
85
+ self.enable_profiling = enable_profiling
86
+ self.enable_debugging = enable_debugging
87
+ self.output_directory = Path(output_directory) if output_directory else None
88
+
89
+ # Initialize components based on configuration
90
+ debug_level = {
91
+ "basic": "basic",
92
+ "standard": "detailed",
93
+ "comprehensive": "verbose"
94
+ }.get(analysis_level, "detailed")
95
+
96
+ self.debugger = CycleDebugger(
97
+ debug_level=debug_level,
98
+ enable_profiling=enable_profiling
99
+ ) if enable_debugging else None
100
+
101
+ self.profiler = CycleProfiler(
102
+ enable_advanced_metrics=(analysis_level == "comprehensive")
103
+ ) if enable_profiling else None
104
+
105
+ # Analysis session tracking
106
+ self.current_session: Optional[str] = None
107
+ self.session_traces: List[CycleExecutionTrace] = []
108
+ self.analysis_history: List[Dict[str, Any]] = []
109
+
110
+ # Create output directory if specified
111
+ if self.output_directory:
112
+ self.output_directory.mkdir(parents=True, exist_ok=True)
113
+ logger.info(f"Analysis output directory: {self.output_directory}")
114
+
115
+ def start_analysis_session(self, session_id: str) -> str:
116
+ """
117
+ Start a new analysis session for grouping related cycles.
118
+
119
+ Analysis sessions allow grouping multiple cycle executions for
120
+ comparative analysis, trend identification, and comprehensive
121
+ reporting across related workflow executions.
122
+
123
+ Args:
124
+ session_id: Unique identifier for the analysis session.
125
+
126
+ Returns:
127
+ Session ID for reference.
128
+
129
+ Examples:
130
+ >>> session = analyzer.start_analysis_session("optimization_experiment_1")
131
+ """
132
+ self.current_session = session_id
133
+ self.session_traces = []
134
+
135
+ logger.info(f"Started analysis session: {session_id}")
136
+ return session_id
137
+
138
+ def start_cycle_analysis(
139
+ self,
140
+ cycle_id: str,
141
+ workflow_id: str,
142
+ max_iterations: Optional[int] = None,
143
+ timeout: Optional[float] = None,
144
+ convergence_condition: Optional[str] = None
145
+ ) -> Optional[CycleExecutionTrace]:
146
+ """
147
+ Start analysis for a new cycle execution.
148
+
149
+ Begins comprehensive tracking for a cycle execution, including
150
+ debugging and profiling as configured. Returns a trace object
151
+ for tracking iteration progress.
152
+
153
+ Args:
154
+ cycle_id: Unique identifier for the cycle.
155
+ workflow_id: Parent workflow identifier.
156
+ max_iterations: Configured iteration limit.
157
+ timeout: Configured timeout limit.
158
+ convergence_condition: Convergence condition.
159
+
160
+ Returns:
161
+ Trace object for tracking, or None if debugging disabled.
162
+
163
+ Examples:
164
+ >>> trace = analyzer.start_cycle_analysis("opt_cycle", "workflow_1", max_iterations=100)
165
+ """
166
+ if not self.debugger:
167
+ logger.warning("Debugging not enabled - cannot create trace")
168
+ return None
169
+
170
+ trace = self.debugger.start_cycle(
171
+ cycle_id=cycle_id,
172
+ workflow_id=workflow_id,
173
+ max_iterations=max_iterations,
174
+ timeout=timeout,
175
+ convergence_condition=convergence_condition
176
+ )
177
+
178
+ logger.info(f"Started cycle analysis for '{cycle_id}' in session '{self.current_session}'")
179
+ return trace
180
+
181
+ def track_iteration(
182
+ self,
183
+ trace: CycleExecutionTrace,
184
+ input_data: Dict[str, Any],
185
+ output_data: Dict[str, Any],
186
+ convergence_value: Optional[float] = None,
187
+ node_executions: Optional[List[str]] = None
188
+ ):
189
+ """
190
+ Track a single cycle iteration with input/output data.
191
+
192
+ Records detailed information about a cycle iteration including
193
+ timing, resource usage, convergence metrics, and execution flow
194
+ for comprehensive analysis.
195
+
196
+ Args:
197
+ trace: Active trace object.
198
+ input_data: Input data for the iteration.
199
+ output_data: Output data from the iteration.
200
+ convergence_value: Convergence metric if available.
201
+ node_executions: List of executed nodes.
202
+
203
+ Examples:
204
+ >>> analyzer.track_iteration(trace, input_data, output_data, convergence_value=0.05)
205
+ """
206
+ if not self.debugger:
207
+ return
208
+
209
+ iteration = self.debugger.start_iteration(trace, input_data)
210
+ self.debugger.end_iteration(
211
+ trace, iteration, output_data, convergence_value, node_executions
212
+ )
213
+
214
+ if self.analysis_level == "comprehensive":
215
+ logger.debug(
216
+ f"Tracked iteration {iteration.iteration_number} for cycle '{trace.cycle_id}' "
217
+ f"with convergence={convergence_value}"
218
+ )
219
+
220
+ def complete_cycle_analysis(
221
+ self,
222
+ trace: CycleExecutionTrace,
223
+ converged: bool,
224
+ termination_reason: str,
225
+ convergence_iteration: Optional[int] = None
226
+ ):
227
+ """
228
+ Complete cycle analysis and generate insights.
229
+
230
+ Finalizes cycle tracking and performs comprehensive analysis
231
+ including performance metrics, optimization recommendations,
232
+ and comparative insights if multiple cycles are available.
233
+
234
+ Args:
235
+ trace: Cycle trace to complete.
236
+ converged: Whether the cycle converged successfully.
237
+ termination_reason: Why the cycle terminated.
238
+ convergence_iteration: Iteration where convergence occurred.
239
+
240
+ Examples:
241
+ >>> analyzer.complete_cycle_analysis(trace, converged=True, termination_reason="convergence")
242
+ """
243
+ if not self.debugger:
244
+ return
245
+
246
+ # Complete debugging
247
+ self.debugger.end_cycle(trace, converged, termination_reason, convergence_iteration)
248
+
249
+ # Add to profiler for performance analysis
250
+ if self.profiler:
251
+ self.profiler.add_trace(trace)
252
+
253
+ # Add to session traces
254
+ self.session_traces.append(trace)
255
+
256
+ logger.info(
257
+ f"Completed cycle analysis for '{trace.cycle_id}' - "
258
+ f"converged={converged}, iterations={len(trace.iterations)}"
259
+ )
260
+
261
+ # Generate immediate insights for comprehensive analysis
262
+ if self.analysis_level == "comprehensive":
263
+ self._generate_immediate_insights(trace)
264
+
265
+ def generate_cycle_report(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
266
+ """
267
+ Generate comprehensive report for a single cycle.
268
+
269
+ Creates a detailed analysis report for a specific cycle execution
270
+ including debugging information, performance metrics, and
271
+ optimization recommendations.
272
+
273
+ Args:
274
+ trace: Completed cycle trace.
275
+
276
+ Returns:
277
+ Comprehensive cycle analysis report.
278
+
279
+ Examples:
280
+ >>> report = analyzer.generate_cycle_report(trace)
281
+ >>> print(f"Cycle efficiency: {report['performance']['efficiency_score']}")
282
+ """
283
+ report = {
284
+ "analysis_info": {
285
+ "cycle_id": trace.cycle_id,
286
+ "workflow_id": trace.workflow_id,
287
+ "analysis_level": self.analysis_level,
288
+ "session_id": self.current_session,
289
+ "generated_at": datetime.now().isoformat()
290
+ }
291
+ }
292
+
293
+ # Add debugging information
294
+ if self.debugger:
295
+ debug_report = self.debugger.generate_report(trace)
296
+ report["debugging"] = debug_report
297
+
298
+ # Add profiling information
299
+ if self.profiler:
300
+ # Create temporary profiler for single trace analysis
301
+ single_profiler = CycleProfiler(enable_advanced_metrics=(self.analysis_level == "comprehensive"))
302
+ single_profiler.add_trace(trace)
303
+
304
+ performance_metrics = single_profiler.analyze_performance()
305
+ recommendations = single_profiler.get_optimization_recommendations(trace)
306
+
307
+ report["performance"] = performance_metrics.to_dict()
308
+ report["recommendations"] = recommendations
309
+
310
+ # Add analysis-level specific insights
311
+ if self.analysis_level == "comprehensive":
312
+ report["advanced_analysis"] = self._generate_advanced_analysis(trace)
313
+
314
+ # Export to file if configured
315
+ if self.output_directory:
316
+ self._export_cycle_report(report, trace.cycle_id)
317
+
318
+ return report
319
+
320
+ def generate_session_report(self, session_id: Optional[str] = None) -> Dict[str, Any]:
321
+ """
322
+ Generate comprehensive report for an analysis session.
323
+
324
+ Creates a detailed analysis report covering all cycles in a session,
325
+ including comparative analysis, trend identification, and overall
326
+ optimization recommendations.
327
+
328
+ Args:
329
+ session_id: Session to analyze, or current session if None.
330
+
331
+ Returns:
332
+ Comprehensive session analysis report.
333
+
334
+ Examples:
335
+ >>> report = analyzer.generate_session_report()
336
+ >>> print(f"Best cycle: {report['comparative_analysis']['best_cycle']}")
337
+ """
338
+ target_session = session_id or self.current_session
339
+ traces_to_analyze = self.session_traces if session_id is None else []
340
+
341
+ report = {
342
+ "session_info": {
343
+ "session_id": target_session,
344
+ "analysis_level": self.analysis_level,
345
+ "cycles_analyzed": len(traces_to_analyze),
346
+ "generated_at": datetime.now().isoformat()
347
+ },
348
+ "summary": {
349
+ "total_cycles": len(traces_to_analyze),
350
+ "total_iterations": sum(len(trace.iterations) for trace in traces_to_analyze),
351
+ "convergence_rate": len([t for t in traces_to_analyze if t.converged]) / len(traces_to_analyze) if traces_to_analyze else 0,
352
+ "avg_cycle_time": sum(t.total_execution_time or 0 for t in traces_to_analyze) / len(traces_to_analyze) if traces_to_analyze else 0
353
+ }
354
+ }
355
+
356
+ if not traces_to_analyze:
357
+ report["warning"] = "No traces available for analysis"
358
+ return report
359
+
360
+ # Add profiling analysis
361
+ if self.profiler and traces_to_analyze:
362
+ # Ensure all traces are in profiler
363
+ for trace in traces_to_analyze:
364
+ if trace not in self.profiler.traces:
365
+ self.profiler.add_trace(trace)
366
+
367
+ performance_report = self.profiler.generate_performance_report()
368
+ report["performance_analysis"] = performance_report
369
+
370
+ # Add comparative analysis
371
+ if len(traces_to_analyze) >= 2:
372
+ cycle_ids = [trace.cycle_id for trace in traces_to_analyze]
373
+ comparison = self.profiler.compare_cycles(cycle_ids) if self.profiler else {}
374
+ report["comparative_analysis"] = comparison
375
+
376
+ # Add session-specific insights
377
+ report["insights"] = self._generate_session_insights(traces_to_analyze)
378
+
379
+ # Export to file if configured
380
+ if self.output_directory:
381
+ self._export_session_report(report, target_session)
382
+
383
+ return report
384
+
385
+ def get_real_time_metrics(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
386
+ """
387
+ Get real-time metrics for an active cycle.
388
+
389
+ Provides current performance metrics and health indicators
390
+ for a cycle that is currently executing, enabling real-time
391
+ monitoring and early intervention if issues are detected.
392
+
393
+ Args:
394
+ trace: Active cycle trace.
395
+
396
+ Returns:
397
+ Dict[str, Any]: Real-time metrics and health indicators
398
+
399
+ Side Effects:
400
+ None - this is a pure analysis method
401
+
402
+ Example:
403
+ >>> metrics = analyzer.get_real_time_metrics(trace)
404
+ >>> if metrics['health_score'] < 0.5:
405
+ ... print("Cycle performance issue detected!")
406
+ """
407
+ if not trace.iterations:
408
+ return {"status": "no_iterations", "health_score": 0.5}
409
+
410
+ recent_iterations = trace.iterations[-5:] # Last 5 iterations
411
+
412
+ # Calculate real-time performance indicators
413
+ avg_recent_time = sum(iter.execution_time or 0 for iter in recent_iterations) / len(recent_iterations)
414
+
415
+ # Memory trend (if available)
416
+ memory_values = [iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb]
417
+ memory_trend = "stable"
418
+ if len(memory_values) >= 2:
419
+ if memory_values[-1] > memory_values[0] * 1.2:
420
+ memory_trend = "increasing"
421
+ elif memory_values[-1] < memory_values[0] * 0.8:
422
+ memory_trend = "decreasing"
423
+
424
+ # Convergence trend
425
+ convergence_values = [iter.convergence_value for iter in recent_iterations if iter.convergence_value]
426
+ convergence_trend = "unknown"
427
+ if len(convergence_values) >= 2:
428
+ if convergence_values[-1] < convergence_values[0]:
429
+ convergence_trend = "improving"
430
+ elif convergence_values[-1] > convergence_values[0]:
431
+ convergence_trend = "degrading"
432
+ else:
433
+ convergence_trend = "stable"
434
+
435
+ # Health score calculation
436
+ health_score = self._calculate_real_time_health_score(trace, recent_iterations)
437
+
438
+ return {
439
+ "status": "active",
440
+ "current_iteration": len(trace.iterations),
441
+ "avg_recent_iteration_time": avg_recent_time,
442
+ "memory_trend": memory_trend,
443
+ "convergence_trend": convergence_trend,
444
+ "health_score": health_score,
445
+ "alerts": self._generate_real_time_alerts(trace, recent_iterations)
446
+ }
447
+
448
+ def export_analysis_data(
449
+ self,
450
+ filepath: Optional[str] = None,
451
+ format: str = "json",
452
+ include_traces: bool = True
453
+ ):
454
+ """
455
+ Export comprehensive analysis data.
456
+
457
+ Exports all analysis data including traces, performance metrics,
458
+ and reports for external analysis, archival, or sharing.
459
+
460
+ Args:
461
+ filepath (Optional[str]): Output file path, auto-generated if None
462
+ format (str): Export format ("json", "csv")
463
+ include_traces (bool): Whether to include detailed trace data
464
+
465
+ Side Effects:
466
+ Creates export file with analysis data
467
+
468
+ Example:
469
+ >>> analyzer.export_analysis_data("cycle_analysis.json", include_traces=True)
470
+ """
471
+ if filepath is None:
472
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
473
+ filepath = f"cycle_analysis_{self.current_session or 'session'}_{timestamp}.{format}"
474
+ if self.output_directory:
475
+ filepath = str(self.output_directory / filepath)
476
+
477
+ export_data = {
478
+ "analysis_metadata": {
479
+ "session_id": self.current_session,
480
+ "analysis_level": self.analysis_level,
481
+ "export_timestamp": datetime.now().isoformat(),
482
+ "cycles_count": len(self.session_traces)
483
+ }
484
+ }
485
+
486
+ # Include session report
487
+ if self.session_traces:
488
+ export_data["session_report"] = self.generate_session_report()
489
+
490
+ # Include individual cycle reports
491
+ if include_traces:
492
+ export_data["cycle_reports"] = [
493
+ self.generate_cycle_report(trace) for trace in self.session_traces
494
+ ]
495
+
496
+ # Include performance history if available
497
+ if self.profiler:
498
+ export_data["performance_history"] = [
499
+ metrics.to_dict() for metrics in self.profiler.performance_history
500
+ ]
501
+
502
+ # Export to file
503
+ if format == "json":
504
+ with open(filepath, 'w') as f:
505
+ json.dump(export_data, f, indent=2)
506
+ elif format == "csv":
507
+ # For CSV, export summary data only
508
+ import csv
509
+ with open(filepath, 'w', newline='') as f:
510
+ writer = csv.writer(f)
511
+
512
+ # Write header
513
+ writer.writerow(["cycle_id", "workflow_id", "iterations", "execution_time", "converged", "efficiency_score"])
514
+
515
+ # Write cycle data
516
+ for trace in self.session_traces:
517
+ stats = trace.get_statistics()
518
+ writer.writerow([
519
+ trace.cycle_id,
520
+ trace.workflow_id,
521
+ len(trace.iterations),
522
+ trace.total_execution_time,
523
+ trace.converged,
524
+ stats.get("efficiency_score", 0.0)
525
+ ])
526
+ else:
527
+ raise ValueError(f"Unsupported export format: {format}")
528
+
529
+ logger.info(f"Exported analysis data to {filepath} in {format} format")
530
+
531
+ def _generate_immediate_insights(self, trace: CycleExecutionTrace):
532
+ """Generate immediate insights for a completed cycle."""
533
+ stats = trace.get_statistics()
534
+
535
+ # Log key insights
536
+ if stats["efficiency_score"] > 0.8:
537
+ logger.info(f"Excellent performance for cycle '{trace.cycle_id}' - efficiency: {stats['efficiency_score']:.2f}")
538
+ elif stats["efficiency_score"] < 0.3:
539
+ logger.warning(f"Poor performance for cycle '{trace.cycle_id}' - efficiency: {stats['efficiency_score']:.2f}")
540
+
541
+ if not trace.converged:
542
+ logger.warning(f"Cycle '{trace.cycle_id}' failed to converge - reason: {trace.termination_reason}")
543
+
544
+ # Check for performance issues
545
+ if stats["avg_iteration_time"] > 1.0:
546
+ logger.warning(f"Slow iterations detected for cycle '{trace.cycle_id}' - avg: {stats['avg_iteration_time']:.3f}s")
547
+
548
+ def _generate_advanced_analysis(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
549
+ """Generate advanced analysis insights for comprehensive mode."""
550
+ convergence_trend = trace.get_convergence_trend()
551
+
552
+ # Convergence pattern analysis
553
+ convergence_analysis = {}
554
+ if convergence_trend:
555
+ values = [value for _, value in convergence_trend if value is not None]
556
+ if len(values) >= 3:
557
+ # Calculate convergence velocity
558
+ velocity = (values[0] - values[-1]) / len(values) if len(values) > 1 else 0
559
+ convergence_analysis = {
560
+ "convergence_velocity": velocity,
561
+ "convergence_pattern": "fast" if velocity > 0.1 else "slow" if velocity > 0.01 else "minimal",
562
+ "stability_score": self._calculate_convergence_stability(values)
563
+ }
564
+
565
+ # Iteration pattern analysis
566
+ iteration_times = [iter.execution_time for iter in trace.iterations if iter.execution_time]
567
+ iteration_analysis = {}
568
+ if iteration_times:
569
+ import statistics
570
+ iteration_analysis = {
571
+ "time_distribution": {
572
+ "mean": statistics.mean(iteration_times),
573
+ "median": statistics.median(iteration_times),
574
+ "mode": statistics.mode(iteration_times) if len(set(iteration_times)) != len(iteration_times) else None,
575
+ "skewness": self._calculate_skewness(iteration_times)
576
+ },
577
+ "performance_trend": self._analyze_performance_trend(iteration_times)
578
+ }
579
+
580
+ return {
581
+ "convergence_analysis": convergence_analysis,
582
+ "iteration_analysis": iteration_analysis,
583
+ "resource_efficiency": self._analyze_resource_efficiency(trace)
584
+ }
585
+
586
+ def _generate_session_insights(self, traces: List[CycleExecutionTrace]) -> Dict[str, Any]:
587
+ """Generate insights across multiple cycles in a session."""
588
+ if not traces:
589
+ return {}
590
+
591
+ # Find best and worst performing cycles
592
+ cycle_scores = {trace.cycle_id: trace.get_statistics()["efficiency_score"] for trace in traces}
593
+ best_cycle = max(cycle_scores.items(), key=lambda x: x[1])
594
+ worst_cycle = min(cycle_scores.items(), key=lambda x: x[1])
595
+
596
+ # Identify patterns
597
+ convergence_rate = len([t for t in traces if t.converged]) / len(traces)
598
+ avg_iterations = sum(len(t.iterations) for t in traces) / len(traces)
599
+
600
+ insights = {
601
+ "best_performing_cycle": {"id": best_cycle[0], "score": best_cycle[1]},
602
+ "worst_performing_cycle": {"id": worst_cycle[0], "score": worst_cycle[1]},
603
+ "overall_convergence_rate": convergence_rate,
604
+ "avg_iterations_per_cycle": avg_iterations,
605
+ "performance_consistency": best_cycle[1] - worst_cycle[1], # Lower is more consistent
606
+ "session_quality": "excellent" if convergence_rate > 0.9 and cycle_scores[best_cycle[0]] > 0.8 else
607
+ "good" if convergence_rate > 0.7 else
608
+ "needs_improvement"
609
+ }
610
+
611
+ return insights
612
+
613
+ def _calculate_real_time_health_score(self, trace: CycleExecutionTrace, recent_iterations: List) -> float:
614
+ """Calculate real-time health score for an active cycle."""
615
+ score_components = []
616
+
617
+ # Performance component
618
+ if recent_iterations:
619
+ avg_time = sum(iter.execution_time or 0 for iter in recent_iterations) / len(recent_iterations)
620
+ time_score = max(0.0, 1.0 - min(1.0, avg_time / 2.0)) # Penalty after 2s per iteration
621
+ score_components.append(time_score)
622
+
623
+ # Error rate component
624
+ error_count = len([iter for iter in recent_iterations if iter.error])
625
+ error_score = max(0.0, 1.0 - (error_count / len(recent_iterations))) if recent_iterations else 1.0
626
+ score_components.append(error_score)
627
+
628
+ # Memory trend component (if available)
629
+ memory_values = [iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb]
630
+ if memory_values and len(memory_values) >= 2:
631
+ memory_growth = (memory_values[-1] - memory_values[0]) / memory_values[0]
632
+ memory_score = max(0.0, 1.0 - max(0.0, memory_growth)) # Penalty for memory growth
633
+ score_components.append(memory_score)
634
+
635
+ return sum(score_components) / len(score_components) if score_components else 0.5
636
+
637
+ def _generate_real_time_alerts(self, trace: CycleExecutionTrace, recent_iterations: List) -> List[str]:
638
+ """Generate real-time alerts for potential issues."""
639
+ alerts = []
640
+
641
+ # Check for slow iterations
642
+ if recent_iterations:
643
+ avg_time = sum(iter.execution_time or 0 for iter in recent_iterations) / len(recent_iterations)
644
+ if avg_time > 2.0:
645
+ alerts.append(f"Slow iterations detected: {avg_time:.2f}s average")
646
+
647
+ # Check for errors
648
+ error_count = len([iter for iter in recent_iterations if iter.error])
649
+ if error_count > 0:
650
+ alerts.append(f"Errors detected in {error_count}/{len(recent_iterations)} recent iterations")
651
+
652
+ # Check for memory growth
653
+ memory_values = [iter.memory_usage_mb for iter in recent_iterations if iter.memory_usage_mb]
654
+ if len(memory_values) >= 2:
655
+ memory_growth = (memory_values[-1] - memory_values[0]) / memory_values[0]
656
+ if memory_growth > 0.2:
657
+ alerts.append(f"Memory usage increasing: {memory_growth*100:.1f}% growth")
658
+
659
+ # Check for potential non-convergence
660
+ if len(trace.iterations) > (trace.max_iterations_configured or 100) * 0.8:
661
+ alerts.append(f"Approaching max iterations: {len(trace.iterations)}/{trace.max_iterations_configured}")
662
+
663
+ return alerts
664
+
665
+ def _calculate_convergence_stability(self, values: List[float]) -> float:
666
+ """Calculate stability score for convergence values."""
667
+ if len(values) < 2:
668
+ return 1.0
669
+
670
+ import statistics
671
+ mean_val = statistics.mean(values)
672
+ if mean_val == 0:
673
+ return 1.0
674
+
675
+ stddev = statistics.stdev(values)
676
+ cv = stddev / mean_val # Coefficient of variation
677
+
678
+ # Lower CV means more stable
679
+ return max(0.0, 1.0 - min(1.0, cv))
680
+
681
+ def _calculate_skewness(self, data: List[float]) -> float:
682
+ """Calculate skewness of data distribution."""
683
+ if len(data) < 3:
684
+ return 0.0
685
+
686
+ import statistics
687
+ mean_val = statistics.mean(data)
688
+ n = len(data)
689
+ variance = sum((x - mean_val) ** 2 for x in data) / n
690
+ if variance == 0:
691
+ return 0.0
692
+
693
+ std_dev = variance ** 0.5
694
+ skewness = sum((x - mean_val) ** 3 for x in data) / (n * std_dev ** 3)
695
+ return skewness
696
+
697
+ def _analyze_performance_trend(self, iteration_times: List[float]) -> str:
698
+ """Analyze performance trend over iterations."""
699
+ if len(iteration_times) < 3:
700
+ return "insufficient_data"
701
+
702
+ # Simple trend analysis
703
+ first_half = iteration_times[:len(iteration_times)//2]
704
+ second_half = iteration_times[len(iteration_times)//2:]
705
+
706
+ import statistics
707
+ first_avg = statistics.mean(first_half)
708
+ second_avg = statistics.mean(second_half)
709
+
710
+ improvement = (first_avg - second_avg) / first_avg
711
+
712
+ if improvement > 0.1:
713
+ return "improving"
714
+ elif improvement < -0.1:
715
+ return "degrading"
716
+ else:
717
+ return "stable"
718
+
719
+ def _analyze_resource_efficiency(self, trace: CycleExecutionTrace) -> Dict[str, Any]:
720
+ """Analyze resource usage efficiency."""
721
+ memory_values = [iter.memory_usage_mb for iter in trace.iterations if iter.memory_usage_mb]
722
+ cpu_values = [iter.cpu_usage_percent for iter in trace.iterations if iter.cpu_usage_percent]
723
+
724
+ efficiency = {}
725
+
726
+ if memory_values:
727
+ import statistics
728
+ efficiency["memory_efficiency"] = {
729
+ "peak_usage": max(memory_values),
730
+ "avg_usage": statistics.mean(memory_values),
731
+ "efficiency_score": max(0.0, 1.0 - (max(memory_values) / 2000)) # Penalty after 2GB
732
+ }
733
+
734
+ if cpu_values:
735
+ import statistics
736
+ efficiency["cpu_efficiency"] = {
737
+ "peak_usage": max(cpu_values),
738
+ "avg_usage": statistics.mean(cpu_values),
739
+ "efficiency_score": min(1.0, statistics.mean(cpu_values) / 100) # Higher CPU usage is better utilization
740
+ }
741
+
742
+ return efficiency
743
+
744
+ def _export_cycle_report(self, report: Dict[str, Any], cycle_id: str):
745
+ """Export cycle report to file."""
746
+ if not self.output_directory:
747
+ return
748
+
749
+ filename = f"cycle_report_{cycle_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
750
+ filepath = self.output_directory / filename
751
+
752
+ with open(filepath, 'w') as f:
753
+ json.dump(report, f, indent=2)
754
+
755
+ logger.debug(f"Exported cycle report to {filepath}")
756
+
757
+ def _export_session_report(self, report: Dict[str, Any], session_id: str):
758
+ """Export session report to file."""
759
+ if not self.output_directory:
760
+ return
761
+
762
+ filename = f"session_report_{session_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
763
+ filepath = self.output_directory / filename
764
+
765
+ with open(filepath, 'w') as f:
766
+ json.dump(report, f, indent=2)
767
+
768
+ logger.debug(f"Exported session report to {filepath}")