kailash 0.9.15__py3-none-any.whl → 0.9.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/middleware/database/base_models.py +7 -1
- kailash/migration/__init__.py +30 -0
- kailash/migration/cli.py +340 -0
- kailash/migration/compatibility_checker.py +662 -0
- kailash/migration/configuration_validator.py +837 -0
- kailash/migration/documentation_generator.py +1828 -0
- kailash/migration/examples/__init__.py +5 -0
- kailash/migration/examples/complete_migration_example.py +692 -0
- kailash/migration/migration_assistant.py +715 -0
- kailash/migration/performance_comparator.py +760 -0
- kailash/migration/regression_detector.py +1141 -0
- kailash/migration/tests/__init__.py +6 -0
- kailash/migration/tests/test_compatibility_checker.py +403 -0
- kailash/migration/tests/test_integration.py +463 -0
- kailash/migration/tests/test_migration_assistant.py +397 -0
- kailash/migration/tests/test_performance_comparator.py +433 -0
- kailash/nodes/data/async_sql.py +1507 -6
- kailash/runtime/local.py +1255 -8
- kailash/runtime/monitoring/__init__.py +1 -0
- kailash/runtime/monitoring/runtime_monitor.py +780 -0
- kailash/runtime/resource_manager.py +3033 -0
- kailash/sdk_exceptions.py +21 -0
- kailash/workflow/cyclic_runner.py +18 -2
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/METADATA +1 -1
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/RECORD +30 -12
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/WHEEL +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/NOTICE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1828 @@
|
|
1
|
+
"""Automated documentation generator for LocalRuntime migration guides.
|
2
|
+
|
3
|
+
This module generates comprehensive migration documentation based on analysis
|
4
|
+
results, configuration changes, and best practices. It creates tailored
|
5
|
+
migration guides for different scenarios and audiences.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import json
|
9
|
+
from dataclasses import dataclass, field
|
10
|
+
from datetime import datetime, timezone
|
11
|
+
from pathlib import Path
|
12
|
+
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
13
|
+
|
14
|
+
from .compatibility_checker import (
|
15
|
+
AnalysisResult,
|
16
|
+
CompatibilityChecker,
|
17
|
+
IssueSeverity,
|
18
|
+
IssueType,
|
19
|
+
)
|
20
|
+
from .configuration_validator import ConfigurationValidator, ValidationResult
|
21
|
+
from .migration_assistant import MigrationAssistant, MigrationPlan, MigrationResult
|
22
|
+
from .performance_comparator import PerformanceComparator, PerformanceReport
|
23
|
+
|
24
|
+
|
25
|
+
@dataclass
|
26
|
+
class DocumentationSection:
|
27
|
+
"""A section of migration documentation."""
|
28
|
+
|
29
|
+
title: str
|
30
|
+
content: str
|
31
|
+
order: int = 0
|
32
|
+
audience: str = "all" # "developer", "admin", "architect", "all"
|
33
|
+
importance: str = "medium" # "critical", "high", "medium", "low"
|
34
|
+
|
35
|
+
|
36
|
+
@dataclass
|
37
|
+
class MigrationGuide:
|
38
|
+
"""Complete migration guide with all sections."""
|
39
|
+
|
40
|
+
title: str
|
41
|
+
sections: List[DocumentationSection] = field(default_factory=list)
|
42
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
43
|
+
generated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
44
|
+
|
45
|
+
def add_section(self, section: DocumentationSection) -> None:
|
46
|
+
"""Add a section to the guide."""
|
47
|
+
self.sections.append(section)
|
48
|
+
# Keep sections sorted by order
|
49
|
+
self.sections.sort(key=lambda s: s.order)
|
50
|
+
|
51
|
+
|
52
|
+
class MigrationDocGenerator:
|
53
|
+
"""Automated generator for migration documentation and guides."""
|
54
|
+
|
55
|
+
def __init__(self):
|
56
|
+
"""Initialize the documentation generator."""
|
57
|
+
self.template_sections = {
|
58
|
+
"overview": self._generate_overview_section,
|
59
|
+
"prerequisites": self._generate_prerequisites_section,
|
60
|
+
"compatibility": self._generate_compatibility_section,
|
61
|
+
"migration_steps": self._generate_migration_steps_section,
|
62
|
+
"configuration": self._generate_configuration_section,
|
63
|
+
"performance": self._generate_performance_section,
|
64
|
+
"validation": self._generate_validation_section,
|
65
|
+
"troubleshooting": self._generate_troubleshooting_section,
|
66
|
+
"best_practices": self._generate_best_practices_section,
|
67
|
+
"rollback": self._generate_rollback_section,
|
68
|
+
"enterprise": self._generate_enterprise_section,
|
69
|
+
"appendix": self._generate_appendix_section,
|
70
|
+
}
|
71
|
+
|
72
|
+
# Documentation templates for different scenarios
|
73
|
+
self.scenario_templates = {
|
74
|
+
"simple": ["overview", "migration_steps", "validation", "troubleshooting"],
|
75
|
+
"standard": [
|
76
|
+
"overview",
|
77
|
+
"prerequisites",
|
78
|
+
"compatibility",
|
79
|
+
"migration_steps",
|
80
|
+
"configuration",
|
81
|
+
"validation",
|
82
|
+
"troubleshooting",
|
83
|
+
"best_practices",
|
84
|
+
],
|
85
|
+
"enterprise": [
|
86
|
+
"overview",
|
87
|
+
"prerequisites",
|
88
|
+
"compatibility",
|
89
|
+
"migration_steps",
|
90
|
+
"configuration",
|
91
|
+
"performance",
|
92
|
+
"validation",
|
93
|
+
"enterprise",
|
94
|
+
"troubleshooting",
|
95
|
+
"best_practices",
|
96
|
+
"rollback",
|
97
|
+
"appendix",
|
98
|
+
],
|
99
|
+
"performance_critical": [
|
100
|
+
"overview",
|
101
|
+
"prerequisites",
|
102
|
+
"performance",
|
103
|
+
"migration_steps",
|
104
|
+
"configuration",
|
105
|
+
"validation",
|
106
|
+
"troubleshooting",
|
107
|
+
"rollback",
|
108
|
+
],
|
109
|
+
}
|
110
|
+
|
111
|
+
def generate_migration_guide(
|
112
|
+
self,
|
113
|
+
analysis_result: Optional[AnalysisResult] = None,
|
114
|
+
migration_plan: Optional[MigrationPlan] = None,
|
115
|
+
migration_result: Optional[MigrationResult] = None,
|
116
|
+
performance_report: Optional[PerformanceReport] = None,
|
117
|
+
validation_result: Optional[ValidationResult] = None,
|
118
|
+
scenario: str = "standard",
|
119
|
+
audience: str = "developer",
|
120
|
+
) -> MigrationGuide:
|
121
|
+
"""Generate a comprehensive migration guide.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
analysis_result: Compatibility analysis results
|
125
|
+
migration_plan: Migration execution plan
|
126
|
+
migration_result: Migration execution results
|
127
|
+
performance_report: Performance comparison results
|
128
|
+
validation_result: Configuration validation results
|
129
|
+
scenario: Documentation scenario template
|
130
|
+
audience: Target audience
|
131
|
+
|
132
|
+
Returns:
|
133
|
+
Complete migration guide
|
134
|
+
"""
|
135
|
+
guide = MigrationGuide(title="LocalRuntime Migration Guide")
|
136
|
+
guide.metadata = {
|
137
|
+
"scenario": scenario,
|
138
|
+
"audience": audience,
|
139
|
+
"has_analysis": analysis_result is not None,
|
140
|
+
"has_migration_plan": migration_plan is not None,
|
141
|
+
"has_migration_result": migration_result is not None,
|
142
|
+
"has_performance_report": performance_report is not None,
|
143
|
+
"has_validation_result": validation_result is not None,
|
144
|
+
}
|
145
|
+
|
146
|
+
# Get template sections for the scenario
|
147
|
+
sections_to_generate = self.scenario_templates.get(
|
148
|
+
scenario, self.scenario_templates["standard"]
|
149
|
+
)
|
150
|
+
|
151
|
+
# Generate each section
|
152
|
+
for order, section_name in enumerate(sections_to_generate, 1):
|
153
|
+
if section_name in self.template_sections:
|
154
|
+
section = self.template_sections[section_name](
|
155
|
+
order,
|
156
|
+
audience,
|
157
|
+
analysis_result,
|
158
|
+
migration_plan,
|
159
|
+
migration_result,
|
160
|
+
performance_report,
|
161
|
+
validation_result,
|
162
|
+
)
|
163
|
+
if section: # Only add non-empty sections
|
164
|
+
guide.add_section(section)
|
165
|
+
|
166
|
+
return guide
|
167
|
+
|
168
|
+
def _generate_overview_section(
|
169
|
+
self,
|
170
|
+
order: int,
|
171
|
+
audience: str,
|
172
|
+
analysis_result: Optional[AnalysisResult],
|
173
|
+
migration_plan: Optional[MigrationPlan],
|
174
|
+
migration_result: Optional[MigrationResult],
|
175
|
+
performance_report: Optional[PerformanceReport],
|
176
|
+
validation_result: Optional[ValidationResult],
|
177
|
+
) -> DocumentationSection:
|
178
|
+
"""Generate overview section."""
|
179
|
+
content = []
|
180
|
+
content.append("# Migration Overview")
|
181
|
+
content.append("")
|
182
|
+
content.append(
|
183
|
+
"This guide provides comprehensive instructions for migrating to the enhanced LocalRuntime."
|
184
|
+
)
|
185
|
+
content.append(
|
186
|
+
"The enhanced LocalRuntime offers improved performance, enterprise features, and better"
|
187
|
+
)
|
188
|
+
content.append("resource management while maintaining backward compatibility.")
|
189
|
+
content.append("")
|
190
|
+
|
191
|
+
# Add scenario-specific information
|
192
|
+
if analysis_result:
|
193
|
+
complexity = analysis_result.migration_complexity
|
194
|
+
effort_days = analysis_result.estimated_effort_days
|
195
|
+
|
196
|
+
content.append("## Migration Complexity Assessment")
|
197
|
+
content.append("")
|
198
|
+
content.append(f"- **Complexity Level**: {complexity.title()}")
|
199
|
+
content.append(f"- **Estimated Effort**: {effort_days} days")
|
200
|
+
content.append(
|
201
|
+
f"- **Files to Modify**: {analysis_result.total_files_analyzed}"
|
202
|
+
)
|
203
|
+
content.append(
|
204
|
+
f"- **Issues Identified**: {analysis_result.summary.get('total_issues', 0)}"
|
205
|
+
)
|
206
|
+
content.append("")
|
207
|
+
|
208
|
+
# Add performance overview
|
209
|
+
if performance_report:
|
210
|
+
overall_change = performance_report.overall_change_percentage
|
211
|
+
status = (
|
212
|
+
"improvement"
|
213
|
+
if performance_report.overall_improvement
|
214
|
+
else "regression"
|
215
|
+
)
|
216
|
+
|
217
|
+
content.append("## Performance Impact")
|
218
|
+
content.append("")
|
219
|
+
content.append(f"- **Overall Performance Change**: {overall_change:+.1f}%")
|
220
|
+
content.append(f"- **Performance Assessment**: {status.title()}")
|
221
|
+
content.append(
|
222
|
+
f"- **Risk Level**: {performance_report.risk_assessment.title()}"
|
223
|
+
)
|
224
|
+
content.append("")
|
225
|
+
|
226
|
+
content.append("## Key Benefits")
|
227
|
+
content.append("")
|
228
|
+
content.append(
|
229
|
+
"- **Enhanced Performance**: Optimized execution engine with better resource management"
|
230
|
+
)
|
231
|
+
content.append(
|
232
|
+
"- **Enterprise Features**: Advanced monitoring, security, and audit capabilities"
|
233
|
+
)
|
234
|
+
content.append(
|
235
|
+
"- **Improved Reliability**: Circuit breakers, retry policies, and error handling"
|
236
|
+
)
|
237
|
+
content.append(
|
238
|
+
"- **Better Observability**: Comprehensive metrics and monitoring"
|
239
|
+
)
|
240
|
+
content.append(
|
241
|
+
"- **Backward Compatibility**: Existing workflows continue to work with minimal changes"
|
242
|
+
)
|
243
|
+
|
244
|
+
return DocumentationSection(
|
245
|
+
title="Overview",
|
246
|
+
content="\n".join(content),
|
247
|
+
order=order,
|
248
|
+
audience=audience,
|
249
|
+
importance="critical",
|
250
|
+
)
|
251
|
+
|
252
|
+
def _generate_prerequisites_section(
|
253
|
+
self,
|
254
|
+
order: int,
|
255
|
+
audience: str,
|
256
|
+
analysis_result: Optional[AnalysisResult],
|
257
|
+
migration_plan: Optional[MigrationPlan],
|
258
|
+
migration_result: Optional[MigrationResult],
|
259
|
+
performance_report: Optional[PerformanceReport],
|
260
|
+
validation_result: Optional[ValidationResult],
|
261
|
+
) -> DocumentationSection:
|
262
|
+
"""Generate prerequisites section."""
|
263
|
+
content = []
|
264
|
+
content.append("# Prerequisites")
|
265
|
+
content.append("")
|
266
|
+
content.append(
|
267
|
+
"Before beginning the migration, ensure you have completed the following prerequisites:"
|
268
|
+
)
|
269
|
+
content.append("")
|
270
|
+
|
271
|
+
content.append("## System Requirements")
|
272
|
+
content.append("")
|
273
|
+
content.append("- Python 3.8 or higher")
|
274
|
+
content.append("- Kailash SDK version 0.9.15 or higher")
|
275
|
+
content.append("- Sufficient system resources (RAM: 2GB+, CPU: 2 cores+)")
|
276
|
+
content.append("- Network access for package installation")
|
277
|
+
content.append("")
|
278
|
+
|
279
|
+
content.append("## Preparation Steps")
|
280
|
+
content.append("")
|
281
|
+
content.append("1. **Backup Your Codebase**")
|
282
|
+
content.append(" ```bash")
|
283
|
+
content.append(" git checkout -b pre-migration-backup")
|
284
|
+
content.append(" git push origin pre-migration-backup")
|
285
|
+
content.append(" ```")
|
286
|
+
content.append("")
|
287
|
+
content.append("2. **Verify Current Installation**")
|
288
|
+
content.append(" ```python")
|
289
|
+
content.append(" import kailash")
|
290
|
+
content.append(" print(kailash.__version__)")
|
291
|
+
content.append(" ```")
|
292
|
+
content.append("")
|
293
|
+
content.append("3. **Run Existing Tests**")
|
294
|
+
content.append(" ```bash")
|
295
|
+
content.append(" python -m pytest tests/")
|
296
|
+
content.append(" ```")
|
297
|
+
content.append("")
|
298
|
+
|
299
|
+
# Add specific prerequisites based on analysis
|
300
|
+
if migration_plan and migration_plan.prerequisites:
|
301
|
+
content.append("## Project-Specific Prerequisites")
|
302
|
+
content.append("")
|
303
|
+
for prereq in migration_plan.prerequisites:
|
304
|
+
content.append(f"- {prereq}")
|
305
|
+
content.append("")
|
306
|
+
|
307
|
+
content.append("## Knowledge Requirements")
|
308
|
+
content.append("")
|
309
|
+
if audience == "developer":
|
310
|
+
content.append("- Familiarity with Kailash SDK and workflow concepts")
|
311
|
+
content.append("- Understanding of LocalRuntime configuration")
|
312
|
+
content.append("- Basic knowledge of Python async/await patterns")
|
313
|
+
elif audience == "admin":
|
314
|
+
content.append("- System administration experience")
|
315
|
+
content.append("- Understanding of resource management and monitoring")
|
316
|
+
content.append("- Knowledge of security and compliance requirements")
|
317
|
+
else:
|
318
|
+
content.append(
|
319
|
+
"- Basic understanding of the current Kailash implementation"
|
320
|
+
)
|
321
|
+
content.append("- Familiarity with workflow automation concepts")
|
322
|
+
|
323
|
+
return DocumentationSection(
|
324
|
+
title="Prerequisites",
|
325
|
+
content="\n".join(content),
|
326
|
+
order=order,
|
327
|
+
audience=audience,
|
328
|
+
importance="high",
|
329
|
+
)
|
330
|
+
|
331
|
+
def _generate_compatibility_section(
|
332
|
+
self,
|
333
|
+
order: int,
|
334
|
+
audience: str,
|
335
|
+
analysis_result: Optional[AnalysisResult],
|
336
|
+
migration_plan: Optional[MigrationPlan],
|
337
|
+
migration_result: Optional[MigrationResult],
|
338
|
+
performance_report: Optional[PerformanceReport],
|
339
|
+
validation_result: Optional[ValidationResult],
|
340
|
+
) -> Optional[DocumentationSection]:
|
341
|
+
"""Generate compatibility analysis section."""
|
342
|
+
if not analysis_result:
|
343
|
+
return None
|
344
|
+
|
345
|
+
content = []
|
346
|
+
content.append("# Compatibility Analysis")
|
347
|
+
content.append("")
|
348
|
+
content.append(
|
349
|
+
"This section details the compatibility issues identified in your codebase"
|
350
|
+
)
|
351
|
+
content.append("and provides specific guidance for addressing them.")
|
352
|
+
content.append("")
|
353
|
+
|
354
|
+
# Summary
|
355
|
+
content.append("## Summary")
|
356
|
+
content.append("")
|
357
|
+
content.append(
|
358
|
+
f"- **Total Issues**: {analysis_result.summary.get('total_issues', 0)}"
|
359
|
+
)
|
360
|
+
content.append(
|
361
|
+
f"- **Critical Issues**: {analysis_result.summary.get('critical_issues', 0)}"
|
362
|
+
)
|
363
|
+
content.append(
|
364
|
+
f"- **Breaking Changes**: {analysis_result.summary.get('breaking_changes', 0)}"
|
365
|
+
)
|
366
|
+
content.append(
|
367
|
+
f"- **Automated Fixes**: {analysis_result.summary.get('automated_fixes', 0)}"
|
368
|
+
)
|
369
|
+
content.append("")
|
370
|
+
|
371
|
+
# Critical issues that must be addressed
|
372
|
+
critical_issues = [
|
373
|
+
i for i in analysis_result.issues if i.severity == IssueSeverity.CRITICAL
|
374
|
+
]
|
375
|
+
if critical_issues:
|
376
|
+
content.append("## Critical Issues (Must Fix)")
|
377
|
+
content.append("")
|
378
|
+
content.append(
|
379
|
+
"These issues will prevent the migration from succeeding and must be resolved:"
|
380
|
+
)
|
381
|
+
content.append("")
|
382
|
+
|
383
|
+
for issue in critical_issues:
|
384
|
+
content.append(f"### {issue.description}")
|
385
|
+
content.append("")
|
386
|
+
content.append(f"**File**: `{issue.file_path}:{issue.line_number}`")
|
387
|
+
content.append("")
|
388
|
+
if issue.code_snippet:
|
389
|
+
content.append("**Current Code**:")
|
390
|
+
content.append("```python")
|
391
|
+
content.append(issue.code_snippet)
|
392
|
+
content.append("```")
|
393
|
+
content.append("")
|
394
|
+
content.append(f"**Solution**: {issue.recommendation}")
|
395
|
+
content.append("")
|
396
|
+
|
397
|
+
# Breaking changes
|
398
|
+
breaking_changes = [i for i in analysis_result.issues if i.breaking_change]
|
399
|
+
if breaking_changes:
|
400
|
+
content.append("## Breaking Changes")
|
401
|
+
content.append("")
|
402
|
+
content.append("The following changes require code modifications:")
|
403
|
+
content.append("")
|
404
|
+
|
405
|
+
for issue in breaking_changes:
|
406
|
+
content.append(f"- **{issue.description}**: {issue.recommendation}")
|
407
|
+
content.append("")
|
408
|
+
|
409
|
+
# Automated fixes
|
410
|
+
auto_fixable = [i for i in analysis_result.issues if i.automated_fix]
|
411
|
+
if auto_fixable:
|
412
|
+
content.append("## Automated Fixes Available")
|
413
|
+
content.append("")
|
414
|
+
content.append(
|
415
|
+
"These issues can be automatically resolved by the migration tool:"
|
416
|
+
)
|
417
|
+
content.append("")
|
418
|
+
|
419
|
+
for issue in auto_fixable:
|
420
|
+
content.append(f"- {issue.description}")
|
421
|
+
content.append("")
|
422
|
+
|
423
|
+
# Enterprise opportunities
|
424
|
+
if analysis_result.enterprise_opportunities:
|
425
|
+
content.append("## Enterprise Feature Opportunities")
|
426
|
+
content.append("")
|
427
|
+
content.append("Consider upgrading to these enterprise features:")
|
428
|
+
content.append("")
|
429
|
+
for opportunity in analysis_result.enterprise_opportunities:
|
430
|
+
content.append(f"- {opportunity}")
|
431
|
+
content.append("")
|
432
|
+
|
433
|
+
return DocumentationSection(
|
434
|
+
title="Compatibility Analysis",
|
435
|
+
content="\n".join(content),
|
436
|
+
order=order,
|
437
|
+
audience=audience,
|
438
|
+
importance="high",
|
439
|
+
)
|
440
|
+
|
441
|
+
def _generate_migration_steps_section(
|
442
|
+
self,
|
443
|
+
order: int,
|
444
|
+
audience: str,
|
445
|
+
analysis_result: Optional[AnalysisResult],
|
446
|
+
migration_plan: Optional[MigrationPlan],
|
447
|
+
migration_result: Optional[MigrationResult],
|
448
|
+
performance_report: Optional[PerformanceReport],
|
449
|
+
validation_result: Optional[ValidationResult],
|
450
|
+
) -> DocumentationSection:
|
451
|
+
"""Generate detailed migration steps section."""
|
452
|
+
content = []
|
453
|
+
content.append("# Migration Steps")
|
454
|
+
content.append("")
|
455
|
+
|
456
|
+
if migration_plan:
|
457
|
+
content.append("## Automated Migration Plan")
|
458
|
+
content.append("")
|
459
|
+
content.append(
|
460
|
+
f"**Estimated Duration**: {migration_plan.estimated_duration_minutes} minutes"
|
461
|
+
)
|
462
|
+
content.append(f"**Risk Level**: {migration_plan.risk_level.title()}")
|
463
|
+
content.append("")
|
464
|
+
|
465
|
+
content.append("### Step-by-Step Process")
|
466
|
+
content.append("")
|
467
|
+
|
468
|
+
for i, step in enumerate(migration_plan.steps, 1):
|
469
|
+
content.append(f"{i}. **{step.description}**")
|
470
|
+
content.append(f" - File: `{step.file_path}`")
|
471
|
+
content.append(f" - Automated: {'Yes' if step.automated else 'No'}")
|
472
|
+
if step.validation_required:
|
473
|
+
content.append(
|
474
|
+
" - **Note**: Manual validation required after this step"
|
475
|
+
)
|
476
|
+
content.append("")
|
477
|
+
else:
|
478
|
+
content.append("## Manual Migration Process")
|
479
|
+
content.append("")
|
480
|
+
|
481
|
+
content.append("### 1. Install Enhanced LocalRuntime")
|
482
|
+
content.append("")
|
483
|
+
content.append("```bash")
|
484
|
+
content.append("pip install --upgrade kailash")
|
485
|
+
content.append("```")
|
486
|
+
content.append("")
|
487
|
+
|
488
|
+
content.append("### 2. Update Import Statements")
|
489
|
+
content.append("")
|
490
|
+
content.append("Ensure you're importing from the correct modules:")
|
491
|
+
content.append("")
|
492
|
+
content.append("```python")
|
493
|
+
content.append("from kailash.runtime.local import LocalRuntime")
|
494
|
+
content.append("from kailash.workflow.builder import WorkflowBuilder")
|
495
|
+
content.append("```")
|
496
|
+
content.append("")
|
497
|
+
|
498
|
+
content.append("### 3. Update LocalRuntime Configuration")
|
499
|
+
content.append("")
|
500
|
+
content.append("**Before (Legacy)**:")
|
501
|
+
content.append("```python")
|
502
|
+
content.append("runtime = LocalRuntime(")
|
503
|
+
content.append(" enable_parallel=True,")
|
504
|
+
content.append(" thread_pool_size=5,")
|
505
|
+
content.append(" debug_mode=True")
|
506
|
+
content.append(")")
|
507
|
+
content.append("```")
|
508
|
+
content.append("")
|
509
|
+
content.append("**After (Enhanced)**:")
|
510
|
+
content.append("```python")
|
511
|
+
content.append("runtime = LocalRuntime(")
|
512
|
+
content.append(" max_concurrency=5,")
|
513
|
+
content.append(" debug=True,")
|
514
|
+
content.append(" enable_monitoring=True")
|
515
|
+
content.append(")")
|
516
|
+
content.append("```")
|
517
|
+
content.append("")
|
518
|
+
|
519
|
+
content.append("### 4. Update Execution Patterns")
|
520
|
+
content.append("")
|
521
|
+
content.append("**Before**:")
|
522
|
+
content.append("```python")
|
523
|
+
content.append("runtime.execute_sync(workflow)")
|
524
|
+
content.append("results = runtime.get_results()")
|
525
|
+
content.append("```")
|
526
|
+
content.append("")
|
527
|
+
content.append("**After**:")
|
528
|
+
content.append("```python")
|
529
|
+
content.append("results, run_id = runtime.execute(workflow)")
|
530
|
+
content.append("```")
|
531
|
+
content.append("")
|
532
|
+
|
533
|
+
content.append("### 5. Test Migration")
|
534
|
+
content.append("")
|
535
|
+
content.append("After making changes, test your workflows:")
|
536
|
+
content.append("")
|
537
|
+
content.append("```python")
|
538
|
+
content.append("# Test basic functionality")
|
539
|
+
content.append("from kailash.workflow.builder import WorkflowBuilder")
|
540
|
+
content.append("from kailash.runtime.local import LocalRuntime")
|
541
|
+
content.append("")
|
542
|
+
content.append("workflow = WorkflowBuilder()")
|
543
|
+
content.append("workflow.add_node('PythonCodeNode', 'test', {")
|
544
|
+
content.append(" 'code': 'result = \"Migration successful!\"',")
|
545
|
+
content.append(" 'output_key': 'message'")
|
546
|
+
content.append("})")
|
547
|
+
content.append("")
|
548
|
+
content.append("runtime = LocalRuntime(debug=True)")
|
549
|
+
content.append("results, run_id = runtime.execute(workflow.build())")
|
550
|
+
content.append("print(results)")
|
551
|
+
content.append("```")
|
552
|
+
|
553
|
+
return DocumentationSection(
|
554
|
+
title="Migration Steps",
|
555
|
+
content="\n".join(content),
|
556
|
+
order=order,
|
557
|
+
audience=audience,
|
558
|
+
importance="critical",
|
559
|
+
)
|
560
|
+
|
561
|
+
def _generate_configuration_section(
|
562
|
+
self,
|
563
|
+
order: int,
|
564
|
+
audience: str,
|
565
|
+
analysis_result: Optional[AnalysisResult],
|
566
|
+
migration_plan: Optional[MigrationPlan],
|
567
|
+
migration_result: Optional[MigrationResult],
|
568
|
+
performance_report: Optional[PerformanceReport],
|
569
|
+
validation_result: Optional[ValidationResult],
|
570
|
+
) -> DocumentationSection:
|
571
|
+
"""Generate configuration reference section."""
|
572
|
+
content = []
|
573
|
+
content.append("# Configuration Reference")
|
574
|
+
content.append("")
|
575
|
+
content.append(
|
576
|
+
"This section provides comprehensive configuration options for the enhanced LocalRuntime."
|
577
|
+
)
|
578
|
+
content.append("")
|
579
|
+
|
580
|
+
# Configuration validation results
|
581
|
+
if validation_result:
|
582
|
+
content.append("## Configuration Validation Results")
|
583
|
+
content.append("")
|
584
|
+
content.append(f"- **Valid**: {'Yes' if validation_result.valid else 'No'}")
|
585
|
+
content.append(
|
586
|
+
f"- **Security Score**: {validation_result.security_score}/100"
|
587
|
+
)
|
588
|
+
content.append(
|
589
|
+
f"- **Performance Score**: {validation_result.performance_score}/100"
|
590
|
+
)
|
591
|
+
content.append(
|
592
|
+
f"- **Enterprise Readiness**: {validation_result.enterprise_readiness}/100"
|
593
|
+
)
|
594
|
+
content.append("")
|
595
|
+
|
596
|
+
if validation_result.optimized_config:
|
597
|
+
content.append("### Optimized Configuration")
|
598
|
+
content.append("")
|
599
|
+
content.append(
|
600
|
+
"Based on validation results, here's an optimized configuration:"
|
601
|
+
)
|
602
|
+
content.append("")
|
603
|
+
content.append("```python")
|
604
|
+
content.append("runtime = LocalRuntime(")
|
605
|
+
for key, value in validation_result.optimized_config.items():
|
606
|
+
if isinstance(value, str):
|
607
|
+
content.append(f' {key}="{value}",')
|
608
|
+
else:
|
609
|
+
content.append(f" {key}={value},")
|
610
|
+
content.append(")")
|
611
|
+
content.append("```")
|
612
|
+
content.append("")
|
613
|
+
|
614
|
+
content.append("## Core Parameters")
|
615
|
+
content.append("")
|
616
|
+
content.append("### Basic Configuration")
|
617
|
+
content.append("")
|
618
|
+
content.append("| Parameter | Type | Default | Description |")
|
619
|
+
content.append("|-----------|------|---------|-------------|")
|
620
|
+
content.append("| `debug` | bool | False | Enable debug logging |")
|
621
|
+
content.append(
|
622
|
+
"| `enable_cycles` | bool | True | Enable cyclic workflow support |"
|
623
|
+
)
|
624
|
+
content.append("| `enable_async` | bool | True | Enable async node execution |")
|
625
|
+
content.append(
|
626
|
+
"| `max_concurrency` | int | 10 | Maximum concurrent operations |"
|
627
|
+
)
|
628
|
+
content.append("")
|
629
|
+
|
630
|
+
content.append("### Performance Configuration")
|
631
|
+
content.append("")
|
632
|
+
content.append("| Parameter | Type | Default | Description |")
|
633
|
+
content.append("|-----------|------|---------|-------------|")
|
634
|
+
content.append(
|
635
|
+
"| `persistent_mode` | bool | False | Enable persistent resource mode |"
|
636
|
+
)
|
637
|
+
content.append(
|
638
|
+
"| `enable_connection_sharing` | bool | True | Enable connection pooling |"
|
639
|
+
)
|
640
|
+
content.append(
|
641
|
+
"| `max_concurrent_workflows` | int | 10 | Max concurrent workflows |"
|
642
|
+
)
|
643
|
+
content.append("| `connection_pool_size` | int | 20 | Connection pool size |")
|
644
|
+
content.append("")
|
645
|
+
|
646
|
+
content.append("### Enterprise Configuration")
|
647
|
+
content.append("")
|
648
|
+
content.append("| Parameter | Type | Default | Description |")
|
649
|
+
content.append("|-----------|------|---------|-------------|")
|
650
|
+
content.append(
|
651
|
+
"| `enable_monitoring` | bool | True | Enable performance monitoring |"
|
652
|
+
)
|
653
|
+
content.append(
|
654
|
+
"| `enable_security` | bool | False | Enable security features |"
|
655
|
+
)
|
656
|
+
content.append("| `enable_audit` | bool | False | Enable audit logging |")
|
657
|
+
content.append(
|
658
|
+
"| `user_context` | UserContext | None | User authentication context |"
|
659
|
+
)
|
660
|
+
content.append("")
|
661
|
+
|
662
|
+
content.append("## Configuration Examples")
|
663
|
+
content.append("")
|
664
|
+
|
665
|
+
content.append("### Development Configuration")
|
666
|
+
content.append("```python")
|
667
|
+
content.append("runtime = LocalRuntime(")
|
668
|
+
content.append(" debug=True,")
|
669
|
+
content.append(" max_concurrency=2,")
|
670
|
+
content.append(" enable_monitoring=True")
|
671
|
+
content.append(")")
|
672
|
+
content.append("```")
|
673
|
+
content.append("")
|
674
|
+
|
675
|
+
content.append("### Production Configuration")
|
676
|
+
content.append("```python")
|
677
|
+
content.append("runtime = LocalRuntime(")
|
678
|
+
content.append(" debug=False,")
|
679
|
+
content.append(" max_concurrency=20,")
|
680
|
+
content.append(" persistent_mode=True,")
|
681
|
+
content.append(" enable_monitoring=True,")
|
682
|
+
content.append(" enable_security=True,")
|
683
|
+
content.append(" resource_limits={")
|
684
|
+
content.append(" 'memory_mb': 2048,")
|
685
|
+
content.append(" 'timeout_seconds': 300")
|
686
|
+
content.append(" }")
|
687
|
+
content.append(")")
|
688
|
+
content.append("```")
|
689
|
+
content.append("")
|
690
|
+
|
691
|
+
content.append("### Enterprise Configuration")
|
692
|
+
content.append("```python")
|
693
|
+
content.append("from kailash.access_control import UserContext")
|
694
|
+
content.append("")
|
695
|
+
content.append(
|
696
|
+
"user_context = UserContext(user_id='user123', roles=['analyst'])"
|
697
|
+
)
|
698
|
+
content.append("")
|
699
|
+
content.append("runtime = LocalRuntime(")
|
700
|
+
content.append(" max_concurrency=50,")
|
701
|
+
content.append(" persistent_mode=True,")
|
702
|
+
content.append(" enable_monitoring=True,")
|
703
|
+
content.append(" enable_security=True,")
|
704
|
+
content.append(" enable_audit=True,")
|
705
|
+
content.append(" enable_enterprise_monitoring=True,")
|
706
|
+
content.append(" user_context=user_context,")
|
707
|
+
content.append(" circuit_breaker_config={")
|
708
|
+
content.append(" 'failure_threshold': 5,")
|
709
|
+
content.append(" 'recovery_timeout': 60")
|
710
|
+
content.append(" }")
|
711
|
+
content.append(")")
|
712
|
+
content.append("```")
|
713
|
+
|
714
|
+
return DocumentationSection(
|
715
|
+
title="Configuration Reference",
|
716
|
+
content="\n".join(content),
|
717
|
+
order=order,
|
718
|
+
audience=audience,
|
719
|
+
importance="high",
|
720
|
+
)
|
721
|
+
|
722
|
+
def _generate_performance_section(
|
723
|
+
self,
|
724
|
+
order: int,
|
725
|
+
audience: str,
|
726
|
+
analysis_result: Optional[AnalysisResult],
|
727
|
+
migration_plan: Optional[MigrationPlan],
|
728
|
+
migration_result: Optional[MigrationResult],
|
729
|
+
performance_report: Optional[PerformanceReport],
|
730
|
+
validation_result: Optional[ValidationResult],
|
731
|
+
) -> Optional[DocumentationSection]:
|
732
|
+
"""Generate performance analysis section."""
|
733
|
+
if not performance_report:
|
734
|
+
return None
|
735
|
+
|
736
|
+
content = []
|
737
|
+
content.append("# Performance Analysis")
|
738
|
+
content.append("")
|
739
|
+
content.append(
|
740
|
+
"This section details the performance impact of migrating to the enhanced LocalRuntime."
|
741
|
+
)
|
742
|
+
content.append("")
|
743
|
+
|
744
|
+
# Executive summary
|
745
|
+
content.append("## Performance Summary")
|
746
|
+
content.append("")
|
747
|
+
content.append(
|
748
|
+
f"- **Overall Performance Change**: {performance_report.overall_change_percentage:+.1f}%"
|
749
|
+
)
|
750
|
+
content.append(
|
751
|
+
f"- **Performance Status**: {'Improvement' if performance_report.overall_improvement else 'Regression'}"
|
752
|
+
)
|
753
|
+
content.append(
|
754
|
+
f"- **Risk Assessment**: {performance_report.risk_assessment.title()}"
|
755
|
+
)
|
756
|
+
content.append("")
|
757
|
+
|
758
|
+
# Detailed metrics
|
759
|
+
content.append("## Detailed Performance Metrics")
|
760
|
+
content.append("")
|
761
|
+
content.append("| Metric | Before | After | Change | Status |")
|
762
|
+
content.append("|--------|---------|--------|---------|---------|")
|
763
|
+
|
764
|
+
for comparison in performance_report.comparisons:
|
765
|
+
status = "✅ Better" if comparison.improvement else "❌ Worse"
|
766
|
+
if comparison.significance == "negligible":
|
767
|
+
status = "➡️ Same"
|
768
|
+
|
769
|
+
content.append(
|
770
|
+
f"| {comparison.metric_name.replace('_', ' ').title()} | "
|
771
|
+
f"{comparison.before_value:.2f} {comparison.unit} | "
|
772
|
+
f"{comparison.after_value:.2f} {comparison.unit} | "
|
773
|
+
f"{comparison.change_percentage:+.1f}% | "
|
774
|
+
f"{status} |"
|
775
|
+
)
|
776
|
+
content.append("")
|
777
|
+
|
778
|
+
# Performance recommendations
|
779
|
+
if performance_report.recommendations:
|
780
|
+
content.append("## Performance Recommendations")
|
781
|
+
content.append("")
|
782
|
+
for i, rec in enumerate(performance_report.recommendations, 1):
|
783
|
+
content.append(f"{i}. {rec}")
|
784
|
+
content.append("")
|
785
|
+
|
786
|
+
# Optimization tips
|
787
|
+
content.append("## Performance Optimization Tips")
|
788
|
+
content.append("")
|
789
|
+
content.append("### General Optimizations")
|
790
|
+
content.append(
|
791
|
+
"- **Connection Pooling**: Enable `enable_connection_sharing=True`"
|
792
|
+
)
|
793
|
+
content.append(
|
794
|
+
"- **Persistent Mode**: Use `persistent_mode=True` for long-running applications"
|
795
|
+
)
|
796
|
+
content.append(
|
797
|
+
"- **Concurrency Tuning**: Adjust `max_concurrency` based on your workload"
|
798
|
+
)
|
799
|
+
content.append(
|
800
|
+
"- **Resource Limits**: Set appropriate `resource_limits` to prevent resource exhaustion"
|
801
|
+
)
|
802
|
+
content.append("")
|
803
|
+
|
804
|
+
content.append("### Monitoring Performance")
|
805
|
+
content.append("```python")
|
806
|
+
content.append("# Enable performance monitoring")
|
807
|
+
content.append("runtime = LocalRuntime(")
|
808
|
+
content.append(" enable_monitoring=True,")
|
809
|
+
content.append(" enable_enterprise_monitoring=True")
|
810
|
+
content.append(")")
|
811
|
+
content.append("")
|
812
|
+
content.append("# Access performance metrics")
|
813
|
+
content.append("results, run_id = runtime.execute(workflow)")
|
814
|
+
content.append(
|
815
|
+
"# Metrics are automatically collected and available via monitoring nodes"
|
816
|
+
)
|
817
|
+
content.append("```")
|
818
|
+
|
819
|
+
return DocumentationSection(
|
820
|
+
title="Performance Analysis",
|
821
|
+
content="\n".join(content),
|
822
|
+
order=order,
|
823
|
+
audience=audience,
|
824
|
+
importance="high",
|
825
|
+
)
|
826
|
+
|
827
|
+
def _generate_validation_section(
|
828
|
+
self,
|
829
|
+
order: int,
|
830
|
+
audience: str,
|
831
|
+
analysis_result: Optional[AnalysisResult],
|
832
|
+
migration_plan: Optional[MigrationPlan],
|
833
|
+
migration_result: Optional[MigrationResult],
|
834
|
+
performance_report: Optional[PerformanceReport],
|
835
|
+
validation_result: Optional[ValidationResult],
|
836
|
+
) -> DocumentationSection:
|
837
|
+
"""Generate validation and testing section."""
|
838
|
+
content = []
|
839
|
+
content.append("# Validation and Testing")
|
840
|
+
content.append("")
|
841
|
+
content.append(
|
842
|
+
"This section provides comprehensive testing strategies to validate your migration."
|
843
|
+
)
|
844
|
+
content.append("")
|
845
|
+
|
846
|
+
# Migration validation results
|
847
|
+
if migration_result:
|
848
|
+
content.append("## Migration Results")
|
849
|
+
content.append("")
|
850
|
+
content.append(
|
851
|
+
f"- **Migration Success**: {'Yes' if migration_result.success else 'No'}"
|
852
|
+
)
|
853
|
+
content.append(f"- **Steps Completed**: {migration_result.steps_completed}")
|
854
|
+
content.append(f"- **Steps Failed**: {migration_result.steps_failed}")
|
855
|
+
|
856
|
+
if migration_result.backup_path:
|
857
|
+
content.append(
|
858
|
+
f"- **Backup Location**: `{migration_result.backup_path}`"
|
859
|
+
)
|
860
|
+
|
861
|
+
if migration_result.errors:
|
862
|
+
content.append("")
|
863
|
+
content.append("### Migration Errors")
|
864
|
+
for error in migration_result.errors:
|
865
|
+
content.append(f"- {error}")
|
866
|
+
|
867
|
+
content.append("")
|
868
|
+
|
869
|
+
content.append("## Validation Checklist")
|
870
|
+
content.append("")
|
871
|
+
content.append("### Basic Functionality")
|
872
|
+
content.append("- [ ] LocalRuntime imports successfully")
|
873
|
+
content.append("- [ ] Basic workflow executes without errors")
|
874
|
+
content.append("- [ ] Results are returned correctly")
|
875
|
+
content.append("- [ ] No syntax errors in modified files")
|
876
|
+
content.append("")
|
877
|
+
|
878
|
+
content.append("### Integration Testing")
|
879
|
+
content.append("- [ ] All existing tests pass")
|
880
|
+
content.append("- [ ] Workflow parameters work correctly")
|
881
|
+
content.append("- [ ] Node connections function properly")
|
882
|
+
content.append("- [ ] Error handling works as expected")
|
883
|
+
content.append("")
|
884
|
+
|
885
|
+
content.append("### Performance Testing")
|
886
|
+
content.append("- [ ] Execution times are acceptable")
|
887
|
+
content.append("- [ ] Memory usage is within limits")
|
888
|
+
content.append("- [ ] Concurrent execution works properly")
|
889
|
+
content.append("- [ ] Resource cleanup functions correctly")
|
890
|
+
content.append("")
|
891
|
+
|
892
|
+
# Test scripts
|
893
|
+
content.append("## Test Scripts")
|
894
|
+
content.append("")
|
895
|
+
|
896
|
+
content.append("### Basic Validation Test")
|
897
|
+
content.append("```python")
|
898
|
+
content.append("#!/usr/bin/env python3")
|
899
|
+
content.append('"""Basic validation test for LocalRuntime migration."""')
|
900
|
+
content.append("")
|
901
|
+
content.append("from kailash.workflow.builder import WorkflowBuilder")
|
902
|
+
content.append("from kailash.runtime.local import LocalRuntime")
|
903
|
+
content.append("")
|
904
|
+
content.append("def test_basic_functionality():")
|
905
|
+
content.append(' """Test basic LocalRuntime functionality."""')
|
906
|
+
content.append(" # Create a simple workflow")
|
907
|
+
content.append(" workflow = WorkflowBuilder()")
|
908
|
+
content.append(" workflow.add_node('PythonCodeNode', 'test_node', {")
|
909
|
+
content.append(" 'code': 'result = 42',")
|
910
|
+
content.append(" 'output_key': 'answer'")
|
911
|
+
content.append(" })")
|
912
|
+
content.append("")
|
913
|
+
content.append(" # Execute with enhanced runtime")
|
914
|
+
content.append(" runtime = LocalRuntime(debug=True)")
|
915
|
+
content.append(" results, run_id = runtime.execute(workflow.build())")
|
916
|
+
content.append("")
|
917
|
+
content.append(" # Validate results")
|
918
|
+
content.append(" assert results is not None")
|
919
|
+
content.append(" assert 'test_node' in results")
|
920
|
+
content.append(" assert results['test_node']['answer'] == 42")
|
921
|
+
content.append(' print("✅ Basic functionality test passed")')
|
922
|
+
content.append("")
|
923
|
+
content.append("if __name__ == '__main__':")
|
924
|
+
content.append(" test_basic_functionality()")
|
925
|
+
content.append("```")
|
926
|
+
content.append("")
|
927
|
+
|
928
|
+
content.append("### Performance Validation Test")
|
929
|
+
content.append("```python")
|
930
|
+
content.append("#!/usr/bin/env python3")
|
931
|
+
content.append('"""Performance validation test for LocalRuntime migration."""')
|
932
|
+
content.append("")
|
933
|
+
content.append("import time")
|
934
|
+
content.append("from kailash.workflow.builder import WorkflowBuilder")
|
935
|
+
content.append("from kailash.runtime.local import LocalRuntime")
|
936
|
+
content.append("")
|
937
|
+
content.append("def test_performance():")
|
938
|
+
content.append(' """Test performance characteristics."""')
|
939
|
+
content.append(" workflow = WorkflowBuilder()")
|
940
|
+
content.append(" workflow.add_node('PythonCodeNode', 'perf_test', {")
|
941
|
+
content.append(" 'code': 'result = sum(range(10000))',")
|
942
|
+
content.append(" 'output_key': 'sum_result'")
|
943
|
+
content.append(" })")
|
944
|
+
content.append("")
|
945
|
+
content.append(" runtime = LocalRuntime(")
|
946
|
+
content.append(" max_concurrency=5,")
|
947
|
+
content.append(" enable_monitoring=True")
|
948
|
+
content.append(" )")
|
949
|
+
content.append("")
|
950
|
+
content.append(" # Time the execution")
|
951
|
+
content.append(" start_time = time.time()")
|
952
|
+
content.append(" results, run_id = runtime.execute(workflow.build())")
|
953
|
+
content.append(" execution_time = time.time() - start_time")
|
954
|
+
content.append("")
|
955
|
+
content.append(" print(f'Execution time: {execution_time:.2f} seconds')")
|
956
|
+
content.append(
|
957
|
+
" assert execution_time < 5.0 # Should complete in under 5 seconds"
|
958
|
+
)
|
959
|
+
content.append(' print("✅ Performance test passed")')
|
960
|
+
content.append("")
|
961
|
+
content.append("if __name__ == '__main__':")
|
962
|
+
content.append(" test_performance()")
|
963
|
+
content.append("```")
|
964
|
+
|
965
|
+
return DocumentationSection(
|
966
|
+
title="Validation and Testing",
|
967
|
+
content="\n".join(content),
|
968
|
+
order=order,
|
969
|
+
audience=audience,
|
970
|
+
importance="high",
|
971
|
+
)
|
972
|
+
|
973
|
+
def _generate_troubleshooting_section(
|
974
|
+
self,
|
975
|
+
order: int,
|
976
|
+
audience: str,
|
977
|
+
analysis_result: Optional[AnalysisResult],
|
978
|
+
migration_plan: Optional[MigrationPlan],
|
979
|
+
migration_result: Optional[MigrationResult],
|
980
|
+
performance_report: Optional[PerformanceReport],
|
981
|
+
validation_result: Optional[ValidationResult],
|
982
|
+
) -> DocumentationSection:
|
983
|
+
"""Generate troubleshooting section."""
|
984
|
+
content = []
|
985
|
+
content.append("# Troubleshooting")
|
986
|
+
content.append("")
|
987
|
+
content.append("This section provides solutions to common migration issues.")
|
988
|
+
content.append("")
|
989
|
+
|
990
|
+
content.append("## Common Issues")
|
991
|
+
content.append("")
|
992
|
+
|
993
|
+
content.append("### ImportError: LocalRuntime Not Found")
|
994
|
+
content.append("")
|
995
|
+
content.append("**Problem**: Import errors when trying to use LocalRuntime")
|
996
|
+
content.append("")
|
997
|
+
content.append("**Solution**:")
|
998
|
+
content.append("```bash")
|
999
|
+
content.append("# Ensure latest version is installed")
|
1000
|
+
content.append("pip install --upgrade kailash")
|
1001
|
+
content.append("")
|
1002
|
+
content.append("# Verify installation")
|
1003
|
+
content.append(
|
1004
|
+
"python -c \"from kailash.runtime.local import LocalRuntime; print('Success')\""
|
1005
|
+
)
|
1006
|
+
content.append("```")
|
1007
|
+
content.append("")
|
1008
|
+
|
1009
|
+
content.append("### Configuration Parameter Errors")
|
1010
|
+
content.append("")
|
1011
|
+
content.append("**Problem**: Errors about unknown or deprecated parameters")
|
1012
|
+
content.append("")
|
1013
|
+
content.append(
|
1014
|
+
"**Solution**: Update parameter names according to the migration guide:"
|
1015
|
+
)
|
1016
|
+
content.append("- `enable_parallel` → `max_concurrency`")
|
1017
|
+
content.append("- `thread_pool_size` → `max_concurrency`")
|
1018
|
+
content.append("- `debug_mode` → `debug`")
|
1019
|
+
content.append("- `memory_limit` → `resource_limits`")
|
1020
|
+
content.append("")
|
1021
|
+
|
1022
|
+
content.append("### Execution Method Errors")
|
1023
|
+
content.append("")
|
1024
|
+
content.append("**Problem**: Methods like `execute_sync()` not found")
|
1025
|
+
content.append("")
|
1026
|
+
content.append("**Solution**: Use the unified `execute()` method:")
|
1027
|
+
content.append("```python")
|
1028
|
+
content.append("# Old way")
|
1029
|
+
content.append("# runtime.execute_sync(workflow)")
|
1030
|
+
content.append("# results = runtime.get_results()")
|
1031
|
+
content.append("")
|
1032
|
+
content.append("# New way")
|
1033
|
+
content.append("results, run_id = runtime.execute(workflow)")
|
1034
|
+
content.append("```")
|
1035
|
+
content.append("")
|
1036
|
+
|
1037
|
+
content.append("### Performance Issues")
|
1038
|
+
content.append("")
|
1039
|
+
content.append("**Problem**: Slower execution after migration")
|
1040
|
+
content.append("")
|
1041
|
+
content.append("**Solutions**:")
|
1042
|
+
content.append("1. **Adjust Concurrency Settings**:")
|
1043
|
+
content.append(" ```python")
|
1044
|
+
content.append(
|
1045
|
+
" runtime = LocalRuntime(max_concurrency=20) # Increase if needed"
|
1046
|
+
)
|
1047
|
+
content.append(" ```")
|
1048
|
+
content.append("")
|
1049
|
+
content.append("2. **Enable Connection Pooling**:")
|
1050
|
+
content.append(" ```python")
|
1051
|
+
content.append(" runtime = LocalRuntime(")
|
1052
|
+
content.append(" enable_connection_sharing=True,")
|
1053
|
+
content.append(" persistent_mode=True")
|
1054
|
+
content.append(" )")
|
1055
|
+
content.append(" ```")
|
1056
|
+
content.append("")
|
1057
|
+
|
1058
|
+
content.append("### Memory Issues")
|
1059
|
+
content.append("")
|
1060
|
+
content.append("**Problem**: High memory usage or out-of-memory errors")
|
1061
|
+
content.append("")
|
1062
|
+
content.append("**Solution**: Configure resource limits:")
|
1063
|
+
content.append("```python")
|
1064
|
+
content.append("runtime = LocalRuntime(")
|
1065
|
+
content.append(" resource_limits={")
|
1066
|
+
content.append(" 'memory_mb': 1024, # Limit to 1GB")
|
1067
|
+
content.append(" 'timeout_seconds': 300")
|
1068
|
+
content.append(" }")
|
1069
|
+
content.append(")")
|
1070
|
+
content.append("```")
|
1071
|
+
content.append("")
|
1072
|
+
|
1073
|
+
content.append("### Security Conflicts")
|
1074
|
+
content.append("")
|
1075
|
+
content.append("**Problem**: Security-related errors or warnings")
|
1076
|
+
content.append("")
|
1077
|
+
content.append("**Solution**: Properly configure security features:")
|
1078
|
+
content.append("```python")
|
1079
|
+
content.append("from kailash.access_control import UserContext")
|
1080
|
+
content.append("")
|
1081
|
+
content.append("user_context = UserContext(user_id='user123')")
|
1082
|
+
content.append("runtime = LocalRuntime(")
|
1083
|
+
content.append(" enable_security=True,")
|
1084
|
+
content.append(" user_context=user_context,")
|
1085
|
+
content.append(" debug=False # Don't enable debug with security")
|
1086
|
+
content.append(")")
|
1087
|
+
content.append("```")
|
1088
|
+
content.append("")
|
1089
|
+
|
1090
|
+
# Add specific troubleshooting based on results
|
1091
|
+
if migration_result and migration_result.errors:
|
1092
|
+
content.append("## Project-Specific Issues")
|
1093
|
+
content.append("")
|
1094
|
+
for error in migration_result.errors:
|
1095
|
+
content.append(f"- **Error**: {error}")
|
1096
|
+
content.append(
|
1097
|
+
" **Solution**: Review the specific error and check configuration parameters"
|
1098
|
+
)
|
1099
|
+
content.append("")
|
1100
|
+
|
1101
|
+
content.append("## Getting Help")
|
1102
|
+
content.append("")
|
1103
|
+
content.append("If you continue to experience issues:")
|
1104
|
+
content.append("")
|
1105
|
+
content.append(
|
1106
|
+
"1. **Check Documentation**: Review the complete Kailash SDK documentation"
|
1107
|
+
)
|
1108
|
+
content.append("2. **Enable Debug Logging**:")
|
1109
|
+
content.append(" ```python")
|
1110
|
+
content.append(" import logging")
|
1111
|
+
content.append(" logging.basicConfig(level=logging.DEBUG)")
|
1112
|
+
content.append(" runtime = LocalRuntime(debug=True)")
|
1113
|
+
content.append(" ```")
|
1114
|
+
content.append(
|
1115
|
+
"3. **Create Minimal Reproduction**: Isolate the issue in a simple test case"
|
1116
|
+
)
|
1117
|
+
content.append(
|
1118
|
+
"4. **Contact Support**: Provide debug logs and configuration details"
|
1119
|
+
)
|
1120
|
+
|
1121
|
+
return DocumentationSection(
|
1122
|
+
title="Troubleshooting",
|
1123
|
+
content="\n".join(content),
|
1124
|
+
order=order,
|
1125
|
+
audience=audience,
|
1126
|
+
importance="high",
|
1127
|
+
)
|
1128
|
+
|
1129
|
+
def _generate_best_practices_section(
|
1130
|
+
self,
|
1131
|
+
order: int,
|
1132
|
+
audience: str,
|
1133
|
+
analysis_result: Optional[AnalysisResult],
|
1134
|
+
migration_plan: Optional[MigrationPlan],
|
1135
|
+
migration_result: Optional[MigrationResult],
|
1136
|
+
performance_report: Optional[PerformanceReport],
|
1137
|
+
validation_result: Optional[ValidationResult],
|
1138
|
+
) -> DocumentationSection:
|
1139
|
+
"""Generate best practices section."""
|
1140
|
+
content = []
|
1141
|
+
content.append("# Best Practices")
|
1142
|
+
content.append("")
|
1143
|
+
content.append(
|
1144
|
+
"This section outlines best practices for using the enhanced LocalRuntime effectively."
|
1145
|
+
)
|
1146
|
+
content.append("")
|
1147
|
+
|
1148
|
+
content.append("## Configuration Best Practices")
|
1149
|
+
content.append("")
|
1150
|
+
content.append("### Environment-Specific Configuration")
|
1151
|
+
content.append("")
|
1152
|
+
content.append("**Development**:")
|
1153
|
+
content.append("```python")
|
1154
|
+
content.append("runtime = LocalRuntime(")
|
1155
|
+
content.append(" debug=True,")
|
1156
|
+
content.append(" max_concurrency=2, # Lower for easier debugging")
|
1157
|
+
content.append(" enable_monitoring=True")
|
1158
|
+
content.append(")")
|
1159
|
+
content.append("```")
|
1160
|
+
content.append("")
|
1161
|
+
content.append("**Production**:")
|
1162
|
+
content.append("```python")
|
1163
|
+
content.append("runtime = LocalRuntime(")
|
1164
|
+
content.append(" debug=False,")
|
1165
|
+
content.append(" max_concurrency=20,")
|
1166
|
+
content.append(" persistent_mode=True,")
|
1167
|
+
content.append(" enable_monitoring=True,")
|
1168
|
+
content.append(" enable_security=True")
|
1169
|
+
content.append(")")
|
1170
|
+
content.append("```")
|
1171
|
+
content.append("")
|
1172
|
+
|
1173
|
+
content.append("### Resource Management")
|
1174
|
+
content.append("")
|
1175
|
+
content.append(
|
1176
|
+
"- **Set Resource Limits**: Always configure appropriate resource limits"
|
1177
|
+
)
|
1178
|
+
content.append(
|
1179
|
+
"- **Monitor Memory Usage**: Use monitoring features to track resource consumption"
|
1180
|
+
)
|
1181
|
+
content.append(
|
1182
|
+
"- **Connection Pooling**: Enable connection sharing for better performance"
|
1183
|
+
)
|
1184
|
+
content.append("- **Cleanup**: Properly dispose of resources when done")
|
1185
|
+
content.append("")
|
1186
|
+
|
1187
|
+
content.append("## Security Best Practices")
|
1188
|
+
content.append("")
|
1189
|
+
content.append("### Authentication and Authorization")
|
1190
|
+
content.append("```python")
|
1191
|
+
content.append("from kailash.access_control import UserContext")
|
1192
|
+
content.append("")
|
1193
|
+
content.append("# Always use proper user context in production")
|
1194
|
+
content.append("user_context = UserContext(")
|
1195
|
+
content.append(" user_id='authenticated_user',")
|
1196
|
+
content.append(" roles=['workflow_executor'],")
|
1197
|
+
content.append(" permissions=['execute', 'read']")
|
1198
|
+
content.append(")")
|
1199
|
+
content.append("")
|
1200
|
+
content.append("runtime = LocalRuntime(")
|
1201
|
+
content.append(" enable_security=True,")
|
1202
|
+
content.append(" enable_audit=True,")
|
1203
|
+
content.append(" user_context=user_context")
|
1204
|
+
content.append(")")
|
1205
|
+
content.append("```")
|
1206
|
+
content.append("")
|
1207
|
+
|
1208
|
+
content.append("### Audit and Compliance")
|
1209
|
+
content.append("- **Enable Audit Logging**: Track all workflow executions")
|
1210
|
+
content.append("- **Secure Credentials**: Use proper secret management")
|
1211
|
+
content.append("- **Access Control**: Implement role-based access control")
|
1212
|
+
content.append("- **Monitoring**: Monitor for security events and anomalies")
|
1213
|
+
content.append("")
|
1214
|
+
|
1215
|
+
content.append("## Performance Best Practices")
|
1216
|
+
content.append("")
|
1217
|
+
content.append("### Concurrency Optimization")
|
1218
|
+
content.append("```python")
|
1219
|
+
content.append("# Optimize based on your workload")
|
1220
|
+
content.append("runtime = LocalRuntime(")
|
1221
|
+
content.append(" max_concurrency=min(cpu_count() * 2, 50),")
|
1222
|
+
content.append(" max_concurrent_workflows=10,")
|
1223
|
+
content.append(" connection_pool_size=100")
|
1224
|
+
content.append(")")
|
1225
|
+
content.append("```")
|
1226
|
+
content.append("")
|
1227
|
+
|
1228
|
+
content.append("### Monitoring and Observability")
|
1229
|
+
content.append("- **Enable Monitoring**: Use built-in performance monitoring")
|
1230
|
+
content.append(
|
1231
|
+
"- **Collect Metrics**: Implement comprehensive metrics collection"
|
1232
|
+
)
|
1233
|
+
content.append("- **Set Alerts**: Configure alerts for performance thresholds")
|
1234
|
+
content.append("- **Regular Reviews**: Periodically review performance metrics")
|
1235
|
+
content.append("")
|
1236
|
+
|
1237
|
+
content.append("## Enterprise Features")
|
1238
|
+
content.append("")
|
1239
|
+
content.append("### Advanced Monitoring")
|
1240
|
+
content.append("```python")
|
1241
|
+
content.append("runtime = LocalRuntime(")
|
1242
|
+
content.append(" enable_monitoring=True,")
|
1243
|
+
content.append(" enable_enterprise_monitoring=True,")
|
1244
|
+
content.append(" enable_health_monitoring=True")
|
1245
|
+
content.append(")")
|
1246
|
+
content.append("```")
|
1247
|
+
content.append("")
|
1248
|
+
|
1249
|
+
content.append("### Resilience and Reliability")
|
1250
|
+
content.append("```python")
|
1251
|
+
content.append("runtime = LocalRuntime(")
|
1252
|
+
content.append(" circuit_breaker_config={")
|
1253
|
+
content.append(" 'failure_threshold': 5,")
|
1254
|
+
content.append(" 'recovery_timeout': 60")
|
1255
|
+
content.append(" },")
|
1256
|
+
content.append(" retry_policy_config={")
|
1257
|
+
content.append(" 'max_retries': 3,")
|
1258
|
+
content.append(" 'backoff_factor': 2.0")
|
1259
|
+
content.append(" }")
|
1260
|
+
content.append(")")
|
1261
|
+
content.append("```")
|
1262
|
+
content.append("")
|
1263
|
+
|
1264
|
+
content.append("## Code Organization")
|
1265
|
+
content.append("")
|
1266
|
+
content.append("### Configuration Management")
|
1267
|
+
content.append("```python")
|
1268
|
+
content.append("# config.py")
|
1269
|
+
content.append("import os")
|
1270
|
+
content.append("from kailash.access_control import UserContext")
|
1271
|
+
content.append("")
|
1272
|
+
content.append("def get_runtime_config():")
|
1273
|
+
content.append(" env = os.getenv('ENVIRONMENT', 'development')")
|
1274
|
+
content.append(" ")
|
1275
|
+
content.append(" base_config = {")
|
1276
|
+
content.append(" 'enable_monitoring': True,")
|
1277
|
+
content.append(" 'enable_connection_sharing': True")
|
1278
|
+
content.append(" }")
|
1279
|
+
content.append(" ")
|
1280
|
+
content.append(" if env == 'production':")
|
1281
|
+
content.append(" base_config.update({")
|
1282
|
+
content.append(" 'debug': False,")
|
1283
|
+
content.append(" 'max_concurrency': 20,")
|
1284
|
+
content.append(" 'enable_security': True,")
|
1285
|
+
content.append(" 'persistent_mode': True")
|
1286
|
+
content.append(" })")
|
1287
|
+
content.append(" else:")
|
1288
|
+
content.append(" base_config.update({")
|
1289
|
+
content.append(" 'debug': True,")
|
1290
|
+
content.append(" 'max_concurrency': 2")
|
1291
|
+
content.append(" })")
|
1292
|
+
content.append(" ")
|
1293
|
+
content.append(" return base_config")
|
1294
|
+
content.append("```")
|
1295
|
+
|
1296
|
+
return DocumentationSection(
|
1297
|
+
title="Best Practices",
|
1298
|
+
content="\n".join(content),
|
1299
|
+
order=order,
|
1300
|
+
audience=audience,
|
1301
|
+
importance="medium",
|
1302
|
+
)
|
1303
|
+
|
1304
|
+
def _generate_rollback_section(
|
1305
|
+
self,
|
1306
|
+
order: int,
|
1307
|
+
audience: str,
|
1308
|
+
analysis_result: Optional[AnalysisResult],
|
1309
|
+
migration_plan: Optional[MigrationPlan],
|
1310
|
+
migration_result: Optional[MigrationResult],
|
1311
|
+
performance_report: Optional[PerformanceReport],
|
1312
|
+
validation_result: Optional[ValidationResult],
|
1313
|
+
) -> DocumentationSection:
|
1314
|
+
"""Generate rollback procedures section."""
|
1315
|
+
content = []
|
1316
|
+
content.append("# Rollback Procedures")
|
1317
|
+
content.append("")
|
1318
|
+
content.append(
|
1319
|
+
"This section provides procedures for rolling back the migration if issues occur."
|
1320
|
+
)
|
1321
|
+
content.append("")
|
1322
|
+
|
1323
|
+
if migration_result and migration_result.backup_path:
|
1324
|
+
content.append("## Automated Rollback")
|
1325
|
+
content.append("")
|
1326
|
+
content.append(
|
1327
|
+
f"A backup was created during migration at: `{migration_result.backup_path}`"
|
1328
|
+
)
|
1329
|
+
content.append("")
|
1330
|
+
content.append("### Quick Rollback")
|
1331
|
+
content.append("```bash")
|
1332
|
+
content.append("# Use the migration assistant to rollback")
|
1333
|
+
content.append('python -c "')
|
1334
|
+
content.append("from kailash.migration import MigrationAssistant")
|
1335
|
+
content.append("assistant = MigrationAssistant()")
|
1336
|
+
content.append("# Rollback using stored results")
|
1337
|
+
content.append('"')
|
1338
|
+
content.append("```")
|
1339
|
+
content.append("")
|
1340
|
+
|
1341
|
+
content.append("## Manual Rollback")
|
1342
|
+
content.append("")
|
1343
|
+
content.append("### Git-based Rollback")
|
1344
|
+
content.append("")
|
1345
|
+
content.append("If you created a Git backup branch:")
|
1346
|
+
content.append("")
|
1347
|
+
content.append("```bash")
|
1348
|
+
content.append("# Rollback to pre-migration state")
|
1349
|
+
content.append("git checkout pre-migration-backup")
|
1350
|
+
content.append("git checkout -b rollback-$(date +%Y%m%d)")
|
1351
|
+
content.append("git merge main # Resolve any conflicts")
|
1352
|
+
content.append("```")
|
1353
|
+
content.append("")
|
1354
|
+
|
1355
|
+
content.append("### Package Rollback")
|
1356
|
+
content.append("")
|
1357
|
+
content.append("If you need to rollback to a previous SDK version:")
|
1358
|
+
content.append("")
|
1359
|
+
content.append("```bash")
|
1360
|
+
content.append("# Install specific version")
|
1361
|
+
content.append("pip install kailash==0.9.14 # Replace with desired version")
|
1362
|
+
content.append("")
|
1363
|
+
content.append("# Verify installation")
|
1364
|
+
content.append('python -c "import kailash; print(kailash.__version__)"')
|
1365
|
+
content.append("```")
|
1366
|
+
content.append("")
|
1367
|
+
|
1368
|
+
content.append("## Rollback Checklist")
|
1369
|
+
content.append("")
|
1370
|
+
content.append("### Pre-Rollback")
|
1371
|
+
content.append("- [ ] Document the issues that prompted rollback")
|
1372
|
+
content.append("- [ ] Backup current state (even if problematic)")
|
1373
|
+
content.append("- [ ] Notify team members of rollback")
|
1374
|
+
content.append("- [ ] Prepare test plan for post-rollback validation")
|
1375
|
+
content.append("")
|
1376
|
+
|
1377
|
+
content.append("### During Rollback")
|
1378
|
+
content.append("- [ ] Restore code to previous working state")
|
1379
|
+
content.append("- [ ] Restore configuration files")
|
1380
|
+
content.append("- [ ] Downgrade packages if necessary")
|
1381
|
+
content.append("- [ ] Clear any cached data or temporary files")
|
1382
|
+
content.append("")
|
1383
|
+
|
1384
|
+
content.append("### Post-Rollback")
|
1385
|
+
content.append("- [ ] Run full test suite")
|
1386
|
+
content.append("- [ ] Verify all workflows execute correctly")
|
1387
|
+
content.append("- [ ] Check performance metrics")
|
1388
|
+
content.append("- [ ] Document lessons learned")
|
1389
|
+
content.append("- [ ] Plan remediation for next migration attempt")
|
1390
|
+
content.append("")
|
1391
|
+
|
1392
|
+
content.append("## Prevention Strategies")
|
1393
|
+
content.append("")
|
1394
|
+
content.append("To avoid needing rollbacks in the future:")
|
1395
|
+
content.append("")
|
1396
|
+
content.append("### Staged Migration")
|
1397
|
+
content.append("1. **Develop**: Test migration on development environment")
|
1398
|
+
content.append("2. **Staging**: Full migration test on staging environment")
|
1399
|
+
content.append("3. **Canary**: Deploy to small subset of production")
|
1400
|
+
content.append("4. **Full Production**: Complete production deployment")
|
1401
|
+
content.append("")
|
1402
|
+
|
1403
|
+
content.append("### Monitoring and Alerts")
|
1404
|
+
content.append("- Set up monitoring for key metrics before migration")
|
1405
|
+
content.append("- Configure alerts for performance regressions")
|
1406
|
+
content.append("- Implement automated health checks")
|
1407
|
+
content.append("- Plan rollback triggers and thresholds")
|
1408
|
+
content.append("")
|
1409
|
+
|
1410
|
+
content.append("## Support and Recovery")
|
1411
|
+
content.append("")
|
1412
|
+
content.append("If rollback doesn't resolve all issues:")
|
1413
|
+
content.append("")
|
1414
|
+
content.append(
|
1415
|
+
"1. **Isolate the Problem**: Identify specific failing components"
|
1416
|
+
)
|
1417
|
+
content.append("2. **Minimal Reproduction**: Create simple test case")
|
1418
|
+
content.append(
|
1419
|
+
"3. **Documentation**: Gather logs, configurations, and error messages"
|
1420
|
+
)
|
1421
|
+
content.append(
|
1422
|
+
"4. **Expert Consultation**: Contact Kailash support with details"
|
1423
|
+
)
|
1424
|
+
|
1425
|
+
return DocumentationSection(
|
1426
|
+
title="Rollback Procedures",
|
1427
|
+
content="\n".join(content),
|
1428
|
+
order=order,
|
1429
|
+
audience=audience,
|
1430
|
+
importance="medium",
|
1431
|
+
)
|
1432
|
+
|
1433
|
+
def _generate_enterprise_section(
|
1434
|
+
self,
|
1435
|
+
order: int,
|
1436
|
+
audience: str,
|
1437
|
+
analysis_result: Optional[AnalysisResult],
|
1438
|
+
migration_plan: Optional[MigrationPlan],
|
1439
|
+
migration_result: Optional[MigrationResult],
|
1440
|
+
performance_report: Optional[PerformanceReport],
|
1441
|
+
validation_result: Optional[ValidationResult],
|
1442
|
+
) -> DocumentationSection:
|
1443
|
+
"""Generate enterprise features section."""
|
1444
|
+
content = []
|
1445
|
+
content.append("# Enterprise Features Guide")
|
1446
|
+
content.append("")
|
1447
|
+
content.append(
|
1448
|
+
"This section covers advanced enterprise features available in the enhanced LocalRuntime."
|
1449
|
+
)
|
1450
|
+
content.append("")
|
1451
|
+
|
1452
|
+
content.append("## Security and Access Control")
|
1453
|
+
content.append("")
|
1454
|
+
content.append("### User Authentication")
|
1455
|
+
content.append("```python")
|
1456
|
+
content.append("from kailash.access_control import UserContext")
|
1457
|
+
content.append("")
|
1458
|
+
content.append("# Create user context with roles and permissions")
|
1459
|
+
content.append("user_context = UserContext(")
|
1460
|
+
content.append(" user_id='john.doe@company.com',")
|
1461
|
+
content.append(" roles=['data_analyst', 'workflow_admin'],")
|
1462
|
+
content.append(" permissions=['execute', 'read', 'write'],")
|
1463
|
+
content.append(
|
1464
|
+
" metadata={'department': 'analytics', 'clearance': 'confidential'}"
|
1465
|
+
)
|
1466
|
+
content.append(")")
|
1467
|
+
content.append("")
|
1468
|
+
content.append("runtime = LocalRuntime(")
|
1469
|
+
content.append(" enable_security=True,")
|
1470
|
+
content.append(" user_context=user_context")
|
1471
|
+
content.append(")")
|
1472
|
+
content.append("```")
|
1473
|
+
content.append("")
|
1474
|
+
|
1475
|
+
content.append("### Audit Logging")
|
1476
|
+
content.append("```python")
|
1477
|
+
content.append("# Enable comprehensive audit logging")
|
1478
|
+
content.append("runtime = LocalRuntime(")
|
1479
|
+
content.append(" enable_security=True,")
|
1480
|
+
content.append(" enable_audit=True,")
|
1481
|
+
content.append(" user_context=user_context")
|
1482
|
+
content.append(")")
|
1483
|
+
content.append("")
|
1484
|
+
content.append("# All workflow executions are now logged with:")
|
1485
|
+
content.append("# - User identity and roles")
|
1486
|
+
content.append("# - Execution timestamps")
|
1487
|
+
content.append("# - Resource access patterns")
|
1488
|
+
content.append("# - Security events and violations")
|
1489
|
+
content.append("```")
|
1490
|
+
content.append("")
|
1491
|
+
|
1492
|
+
content.append("## Advanced Monitoring")
|
1493
|
+
content.append("")
|
1494
|
+
content.append("### Enterprise Monitoring")
|
1495
|
+
content.append("```python")
|
1496
|
+
content.append("runtime = LocalRuntime(")
|
1497
|
+
content.append(" enable_monitoring=True,")
|
1498
|
+
content.append(" enable_enterprise_monitoring=True,")
|
1499
|
+
content.append(" enable_health_monitoring=True")
|
1500
|
+
content.append(")")
|
1501
|
+
content.append("")
|
1502
|
+
content.append("# Provides advanced metrics:")
|
1503
|
+
content.append("# - Real-time performance analytics")
|
1504
|
+
content.append("# - Resource utilization trends")
|
1505
|
+
content.append("# - Predictive failure detection")
|
1506
|
+
content.append("# - Business-level KPIs")
|
1507
|
+
content.append("```")
|
1508
|
+
content.append("")
|
1509
|
+
|
1510
|
+
content.append("### Custom Metrics")
|
1511
|
+
content.append("```python")
|
1512
|
+
content.append("from kailash.nodes.monitoring import MetricsCollectorNode")
|
1513
|
+
content.append("")
|
1514
|
+
content.append("# Add custom metrics collection to workflows")
|
1515
|
+
content.append("workflow.add_node('MetricsCollectorNode', 'metrics', {")
|
1516
|
+
content.append(" 'metrics': {")
|
1517
|
+
content.append(" 'business_value': 'calculate_roi',")
|
1518
|
+
content.append(" 'data_quality': 'check_quality_score',")
|
1519
|
+
content.append(" 'processing_speed': 'measure_throughput'")
|
1520
|
+
content.append(" }")
|
1521
|
+
content.append("})")
|
1522
|
+
content.append("```")
|
1523
|
+
content.append("")
|
1524
|
+
|
1525
|
+
content.append("## Reliability and Resilience")
|
1526
|
+
content.append("")
|
1527
|
+
content.append("### Circuit Breaker Pattern")
|
1528
|
+
content.append("```python")
|
1529
|
+
content.append("runtime = LocalRuntime(")
|
1530
|
+
content.append(" enable_resource_coordination=True,")
|
1531
|
+
content.append(" circuit_breaker_config={")
|
1532
|
+
content.append(" 'failure_threshold': 5, # Failures before opening")
|
1533
|
+
content.append(" 'recovery_timeout': 60, # Seconds in open state")
|
1534
|
+
content.append(" 'success_threshold': 3, # Successes to close")
|
1535
|
+
content.append(" 'timeout': 30 # Request timeout")
|
1536
|
+
content.append(" }")
|
1537
|
+
content.append(")")
|
1538
|
+
content.append("```")
|
1539
|
+
content.append("")
|
1540
|
+
|
1541
|
+
content.append("### Retry Policies")
|
1542
|
+
content.append("```python")
|
1543
|
+
content.append("runtime = LocalRuntime(")
|
1544
|
+
content.append(" enable_resource_coordination=True,")
|
1545
|
+
content.append(" retry_policy_config={")
|
1546
|
+
content.append(" 'max_retries': 3,")
|
1547
|
+
content.append(" 'backoff_factor': 2.0, # Exponential backoff")
|
1548
|
+
content.append(" 'max_backoff': 300, # Max wait time")
|
1549
|
+
content.append(" 'retry_on': ['timeout', 'connection_error']")
|
1550
|
+
content.append(" }")
|
1551
|
+
content.append(")")
|
1552
|
+
content.append("```")
|
1553
|
+
content.append("")
|
1554
|
+
|
1555
|
+
content.append("## Resource Management")
|
1556
|
+
content.append("")
|
1557
|
+
content.append("### Advanced Resource Limits")
|
1558
|
+
content.append("```python")
|
1559
|
+
content.append("runtime = LocalRuntime(")
|
1560
|
+
content.append(" resource_limits={")
|
1561
|
+
content.append(" 'memory_mb': 4096,")
|
1562
|
+
content.append(" 'cpu_percent': 80,")
|
1563
|
+
content.append(" 'timeout_seconds': 1800,")
|
1564
|
+
content.append(" 'max_files_open': 1000,")
|
1565
|
+
content.append(" 'max_network_connections': 100")
|
1566
|
+
content.append(" }")
|
1567
|
+
content.append(")")
|
1568
|
+
content.append("```")
|
1569
|
+
content.append("")
|
1570
|
+
|
1571
|
+
content.append("### Connection Pool Management")
|
1572
|
+
content.append("```python")
|
1573
|
+
content.append("runtime = LocalRuntime(")
|
1574
|
+
content.append(" persistent_mode=True,")
|
1575
|
+
content.append(" enable_connection_sharing=True,")
|
1576
|
+
content.append(" connection_pool_config={")
|
1577
|
+
content.append(" 'min_connections': 10,")
|
1578
|
+
content.append(" 'max_connections': 100,")
|
1579
|
+
content.append(" 'connection_timeout': 30,")
|
1580
|
+
content.append(" 'idle_timeout': 300,")
|
1581
|
+
content.append(" 'health_check_interval': 60")
|
1582
|
+
content.append(" }")
|
1583
|
+
content.append(")")
|
1584
|
+
content.append("```")
|
1585
|
+
content.append("")
|
1586
|
+
|
1587
|
+
content.append("## Integration Features")
|
1588
|
+
content.append("")
|
1589
|
+
content.append("### Custom Secret Providers")
|
1590
|
+
content.append("```python")
|
1591
|
+
content.append("from kailash.runtime.secret_provider import SecretProvider")
|
1592
|
+
content.append("")
|
1593
|
+
content.append("class EnterpriseSecretProvider(SecretProvider):")
|
1594
|
+
content.append(" def get_secret(self, key: str) -> str:")
|
1595
|
+
content.append(" # Integrate with enterprise secret management")
|
1596
|
+
content.append(" return vault_client.get_secret(key)")
|
1597
|
+
content.append("")
|
1598
|
+
content.append("runtime = LocalRuntime(")
|
1599
|
+
content.append(" secret_provider=EnterpriseSecretProvider()")
|
1600
|
+
content.append(")")
|
1601
|
+
content.append("```")
|
1602
|
+
content.append("")
|
1603
|
+
|
1604
|
+
content.append("## Compliance and Governance")
|
1605
|
+
content.append("")
|
1606
|
+
content.append("### Data Governance")
|
1607
|
+
content.append("```python")
|
1608
|
+
content.append("from kailash.nodes.compliance import DataRetentionNode")
|
1609
|
+
content.append("")
|
1610
|
+
content.append("# Add data governance to workflows")
|
1611
|
+
content.append("workflow.add_node('DataRetentionNode', 'governance', {")
|
1612
|
+
content.append(" 'retention_policy': 'gdpr',")
|
1613
|
+
content.append(" 'classification': 'sensitive',")
|
1614
|
+
content.append(" 'audit_trail': True")
|
1615
|
+
content.append("})")
|
1616
|
+
content.append("```")
|
1617
|
+
content.append("")
|
1618
|
+
|
1619
|
+
content.append("### Regulatory Compliance")
|
1620
|
+
content.append("```python")
|
1621
|
+
content.append("runtime = LocalRuntime(")
|
1622
|
+
content.append(" enable_security=True,")
|
1623
|
+
content.append(" enable_audit=True,")
|
1624
|
+
content.append(" compliance_mode='strict', # GDPR, HIPAA, SOX compliance")
|
1625
|
+
content.append(" data_classification_required=True")
|
1626
|
+
content.append(")")
|
1627
|
+
content.append("```")
|
1628
|
+
|
1629
|
+
return DocumentationSection(
|
1630
|
+
title="Enterprise Features",
|
1631
|
+
content="\n".join(content),
|
1632
|
+
order=order,
|
1633
|
+
audience=audience,
|
1634
|
+
importance="medium",
|
1635
|
+
)
|
1636
|
+
|
1637
|
+
def _generate_appendix_section(
|
1638
|
+
self,
|
1639
|
+
order: int,
|
1640
|
+
audience: str,
|
1641
|
+
analysis_result: Optional[AnalysisResult],
|
1642
|
+
migration_plan: Optional[MigrationPlan],
|
1643
|
+
migration_result: Optional[MigrationResult],
|
1644
|
+
performance_report: Optional[PerformanceReport],
|
1645
|
+
validation_result: Optional[ValidationResult],
|
1646
|
+
) -> DocumentationSection:
|
1647
|
+
"""Generate appendix section."""
|
1648
|
+
content = []
|
1649
|
+
content.append("# Appendix")
|
1650
|
+
content.append("")
|
1651
|
+
|
1652
|
+
content.append("## Migration Tool Reference")
|
1653
|
+
content.append("")
|
1654
|
+
content.append("### Command Line Usage")
|
1655
|
+
content.append("```bash")
|
1656
|
+
content.append("# Analyze codebase for compatibility")
|
1657
|
+
content.append('python -c "')
|
1658
|
+
content.append("from kailash.migration import CompatibilityChecker")
|
1659
|
+
content.append("checker = CompatibilityChecker()")
|
1660
|
+
content.append("result = checker.analyze_codebase('/path/to/project')")
|
1661
|
+
content.append("print(checker.generate_report(result))")
|
1662
|
+
content.append('"')
|
1663
|
+
content.append("")
|
1664
|
+
content.append("# Generate migration plan")
|
1665
|
+
content.append('python -c "')
|
1666
|
+
content.append("from kailash.migration import MigrationAssistant")
|
1667
|
+
content.append("assistant = MigrationAssistant()")
|
1668
|
+
content.append("plan = assistant.create_migration_plan('/path/to/project')")
|
1669
|
+
content.append("print(assistant.generate_migration_report(plan))")
|
1670
|
+
content.append('"')
|
1671
|
+
content.append("```")
|
1672
|
+
content.append("")
|
1673
|
+
|
1674
|
+
content.append("## Parameter Migration Map")
|
1675
|
+
content.append("")
|
1676
|
+
content.append("| Legacy Parameter | New Parameter | Notes |")
|
1677
|
+
content.append("|------------------|---------------|-------|")
|
1678
|
+
content.append("| `enable_parallel` | `max_concurrency` | Boolean → Integer |")
|
1679
|
+
content.append("| `thread_pool_size` | `max_concurrency` | Direct mapping |")
|
1680
|
+
content.append("| `debug_mode` | `debug` | Parameter rename |")
|
1681
|
+
content.append(
|
1682
|
+
"| `memory_limit` | `resource_limits['memory_mb']` | Move to dict |"
|
1683
|
+
)
|
1684
|
+
content.append(
|
1685
|
+
"| `timeout` | `resource_limits['timeout_seconds']` | Move to dict |"
|
1686
|
+
)
|
1687
|
+
content.append(
|
1688
|
+
"| `retry_count` | `retry_policy_config['max_retries']` | Move to dict |"
|
1689
|
+
)
|
1690
|
+
content.append("| `log_level` | Use logging config | Removed |")
|
1691
|
+
content.append("| `cache_enabled` | Use CacheNode | Use nodes instead |")
|
1692
|
+
content.append("")
|
1693
|
+
|
1694
|
+
content.append("## Method Migration Map")
|
1695
|
+
content.append("")
|
1696
|
+
content.append("| Legacy Method | New Method | Notes |")
|
1697
|
+
content.append("|---------------|------------|-------|")
|
1698
|
+
content.append(
|
1699
|
+
"| `execute_sync(workflow)` | `execute(workflow)` | Unified method |"
|
1700
|
+
)
|
1701
|
+
content.append(
|
1702
|
+
"| `execute_async(workflow)` | `execute(workflow)` | Use `enable_async=True` |"
|
1703
|
+
)
|
1704
|
+
content.append("| `get_results()` | Return from `execute()` | Direct return |")
|
1705
|
+
content.append(
|
1706
|
+
"| `set_context(ctx)` | Constructor param | Use `user_context` |"
|
1707
|
+
)
|
1708
|
+
content.append(
|
1709
|
+
"| `configure(config)` | Constructor params | Use named params |"
|
1710
|
+
)
|
1711
|
+
content.append("")
|
1712
|
+
|
1713
|
+
content.append("## Enterprise Feature Matrix")
|
1714
|
+
content.append("")
|
1715
|
+
content.append("| Feature | Parameter | Dependency | Description |")
|
1716
|
+
content.append("|---------|-----------|------------|-------------|")
|
1717
|
+
content.append(
|
1718
|
+
"| Security | `enable_security=True` | `user_context` | Access control |"
|
1719
|
+
)
|
1720
|
+
content.append(
|
1721
|
+
"| Audit | `enable_audit=True` | `enable_security` | Compliance logging |"
|
1722
|
+
)
|
1723
|
+
content.append(
|
1724
|
+
"| Advanced Monitoring | `enable_enterprise_monitoring=True` | `enable_monitoring` | Business metrics |"
|
1725
|
+
)
|
1726
|
+
content.append(
|
1727
|
+
"| Health Monitoring | `enable_health_monitoring=True` | None | System health |"
|
1728
|
+
)
|
1729
|
+
content.append(
|
1730
|
+
"| Circuit Breaker | `circuit_breaker_config` | `enable_resource_coordination` | Resilience |"
|
1731
|
+
)
|
1732
|
+
content.append(
|
1733
|
+
"| Retry Policy | `retry_policy_config` | `enable_resource_coordination` | Reliability |"
|
1734
|
+
)
|
1735
|
+
content.append("")
|
1736
|
+
|
1737
|
+
content.append("## Common Error Codes")
|
1738
|
+
content.append("")
|
1739
|
+
content.append("### Configuration Errors")
|
1740
|
+
content.append("- `RUNTIME_CONFIG_001`: Unknown parameter")
|
1741
|
+
content.append("- `RUNTIME_CONFIG_002`: Invalid parameter type")
|
1742
|
+
content.append("- `RUNTIME_CONFIG_003`: Parameter value out of range")
|
1743
|
+
content.append("- `RUNTIME_CONFIG_004`: Missing required dependency")
|
1744
|
+
content.append("- `RUNTIME_CONFIG_005`: Parameter conflict")
|
1745
|
+
content.append("")
|
1746
|
+
|
1747
|
+
content.append("### Execution Errors")
|
1748
|
+
content.append("- `RUNTIME_EXEC_001`: Method not found")
|
1749
|
+
content.append("- `RUNTIME_EXEC_002`: Invalid workflow format")
|
1750
|
+
content.append("- `RUNTIME_EXEC_003`: Resource limit exceeded")
|
1751
|
+
content.append("- `RUNTIME_EXEC_004`: Security violation")
|
1752
|
+
content.append("- `RUNTIME_EXEC_005`: Timeout exceeded")
|
1753
|
+
content.append("")
|
1754
|
+
|
1755
|
+
content.append("## Support Resources")
|
1756
|
+
content.append("")
|
1757
|
+
content.append("- **Documentation**: https://kailash-sdk.readthedocs.io/")
|
1758
|
+
content.append("- **API Reference**: https://api.kailash-sdk.com/")
|
1759
|
+
content.append("- **Migration Tools**: `kailash.migration` module")
|
1760
|
+
content.append("- **Community Forum**: https://community.kailash-sdk.com/")
|
1761
|
+
content.append("- **Issue Tracker**: https://github.com/kailash-sdk/issues/")
|
1762
|
+
|
1763
|
+
return DocumentationSection(
|
1764
|
+
title="Appendix",
|
1765
|
+
content="\n".join(content),
|
1766
|
+
order=order,
|
1767
|
+
audience=audience,
|
1768
|
+
importance="low",
|
1769
|
+
)
|
1770
|
+
|
1771
|
+
def export_guide(
|
1772
|
+
self,
|
1773
|
+
guide: MigrationGuide,
|
1774
|
+
file_path: Union[str, Path],
|
1775
|
+
format: str = "markdown",
|
1776
|
+
) -> None:
|
1777
|
+
"""Export migration guide to file.
|
1778
|
+
|
1779
|
+
Args:
|
1780
|
+
guide: Migration guide to export
|
1781
|
+
file_path: Output file path
|
1782
|
+
format: Export format ("markdown", "html", "pdf")
|
1783
|
+
"""
|
1784
|
+
file_path = Path(file_path)
|
1785
|
+
|
1786
|
+
if format == "markdown":
|
1787
|
+
content = self._export_markdown(guide)
|
1788
|
+
elif format == "html":
|
1789
|
+
content = self._export_html(guide)
|
1790
|
+
else:
|
1791
|
+
content = self._export_markdown(guide) # Default to markdown
|
1792
|
+
|
1793
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
1794
|
+
f.write(content)
|
1795
|
+
|
1796
|
+
def _export_markdown(self, guide: MigrationGuide) -> str:
|
1797
|
+
"""Export guide as markdown."""
|
1798
|
+
content = []
|
1799
|
+
|
1800
|
+
# Title and metadata
|
1801
|
+
content.append(f"# {guide.title}")
|
1802
|
+
content.append("")
|
1803
|
+
content.append(
|
1804
|
+
f"*Generated: {guide.generated_at.strftime('%Y-%m-%d %H:%M:%S')}*"
|
1805
|
+
)
|
1806
|
+
content.append("")
|
1807
|
+
|
1808
|
+
# Table of contents
|
1809
|
+
content.append("## Table of Contents")
|
1810
|
+
content.append("")
|
1811
|
+
for section in guide.sections:
|
1812
|
+
content.append(
|
1813
|
+
f"- [{section.title}](#{section.title.lower().replace(' ', '-')})"
|
1814
|
+
)
|
1815
|
+
content.append("")
|
1816
|
+
|
1817
|
+
# Sections
|
1818
|
+
for section in guide.sections:
|
1819
|
+
content.append(section.content)
|
1820
|
+
content.append("")
|
1821
|
+
|
1822
|
+
return "\n".join(content)
|
1823
|
+
|
1824
|
+
def _export_html(self, guide: MigrationGuide) -> str:
|
1825
|
+
"""Export guide as HTML."""
|
1826
|
+
# This would require a markdown-to-HTML converter
|
1827
|
+
# For now, return markdown format
|
1828
|
+
return self._export_markdown(guide)
|