kailash 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -7
- kailash/cli/__init__.py +11 -1
- kailash/cli/validation_audit.py +570 -0
- kailash/core/actors/supervisor.py +1 -1
- kailash/core/resilience/circuit_breaker.py +71 -1
- kailash/core/resilience/health_monitor.py +172 -0
- kailash/edge/compliance.py +33 -0
- kailash/edge/consistency.py +609 -0
- kailash/edge/coordination/__init__.py +30 -0
- kailash/edge/coordination/global_ordering.py +355 -0
- kailash/edge/coordination/leader_election.py +217 -0
- kailash/edge/coordination/partition_detector.py +296 -0
- kailash/edge/coordination/raft.py +485 -0
- kailash/edge/discovery.py +63 -1
- kailash/edge/migration/__init__.py +19 -0
- kailash/edge/migration/edge_migrator.py +832 -0
- kailash/edge/monitoring/__init__.py +21 -0
- kailash/edge/monitoring/edge_monitor.py +736 -0
- kailash/edge/prediction/__init__.py +10 -0
- kailash/edge/prediction/predictive_warmer.py +591 -0
- kailash/edge/resource/__init__.py +102 -0
- kailash/edge/resource/cloud_integration.py +796 -0
- kailash/edge/resource/cost_optimizer.py +949 -0
- kailash/edge/resource/docker_integration.py +919 -0
- kailash/edge/resource/kubernetes_integration.py +893 -0
- kailash/edge/resource/platform_integration.py +913 -0
- kailash/edge/resource/predictive_scaler.py +959 -0
- kailash/edge/resource/resource_analyzer.py +824 -0
- kailash/edge/resource/resource_pools.py +610 -0
- kailash/integrations/dataflow_edge.py +261 -0
- kailash/mcp_server/registry_integration.py +1 -1
- kailash/monitoring/__init__.py +18 -0
- kailash/monitoring/alerts.py +646 -0
- kailash/monitoring/metrics.py +677 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/__init__.py +17 -0
- kailash/nodes/ai/a2a.py +1914 -43
- kailash/nodes/ai/a2a_backup.py +1807 -0
- kailash/nodes/ai/hybrid_search.py +972 -0
- kailash/nodes/ai/semantic_memory.py +558 -0
- kailash/nodes/ai/streaming_analytics.py +947 -0
- kailash/nodes/base.py +545 -0
- kailash/nodes/edge/__init__.py +36 -0
- kailash/nodes/edge/base.py +240 -0
- kailash/nodes/edge/cloud_node.py +710 -0
- kailash/nodes/edge/coordination.py +239 -0
- kailash/nodes/edge/docker_node.py +825 -0
- kailash/nodes/edge/edge_data.py +582 -0
- kailash/nodes/edge/edge_migration_node.py +392 -0
- kailash/nodes/edge/edge_monitoring_node.py +421 -0
- kailash/nodes/edge/edge_state.py +673 -0
- kailash/nodes/edge/edge_warming_node.py +393 -0
- kailash/nodes/edge/kubernetes_node.py +652 -0
- kailash/nodes/edge/platform_node.py +766 -0
- kailash/nodes/edge/resource_analyzer_node.py +378 -0
- kailash/nodes/edge/resource_optimizer_node.py +501 -0
- kailash/nodes/edge/resource_scaler_node.py +397 -0
- kailash/nodes/ports.py +676 -0
- kailash/runtime/local.py +344 -1
- kailash/runtime/validation/__init__.py +20 -0
- kailash/runtime/validation/connection_context.py +119 -0
- kailash/runtime/validation/enhanced_error_formatter.py +202 -0
- kailash/runtime/validation/error_categorizer.py +164 -0
- kailash/runtime/validation/metrics.py +380 -0
- kailash/runtime/validation/performance.py +615 -0
- kailash/runtime/validation/suggestion_engine.py +212 -0
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +234 -8
- kailash/workflow/contracts.py +418 -0
- kailash/workflow/edge_infrastructure.py +369 -0
- kailash/workflow/migration.py +3 -3
- kailash/workflow/type_inference.py +669 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/METADATA +44 -27
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/RECORD +78 -28
- kailash/nexus/__init__.py +0 -21
- kailash/nexus/cli/__init__.py +0 -5
- kailash/nexus/cli/__main__.py +0 -6
- kailash/nexus/cli/main.py +0 -176
- kailash/nexus/factory.py +0 -413
- kailash/nexus/gateway.py +0 -545
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,392 @@
|
|
1
|
+
"""Edge migration node for live workload migration between edge nodes.
|
2
|
+
|
3
|
+
This node integrates edge migration capabilities into workflows,
|
4
|
+
enabling zero-downtime migration of workloads and data.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
from datetime import datetime
|
9
|
+
from typing import Any, Dict, List, Optional
|
10
|
+
|
11
|
+
from kailash.edge.migration.edge_migrator import (
|
12
|
+
EdgeMigrator,
|
13
|
+
MigrationPhase,
|
14
|
+
MigrationPlan,
|
15
|
+
MigrationProgress,
|
16
|
+
MigrationStrategy,
|
17
|
+
)
|
18
|
+
from kailash.nodes.base import NodeParameter, register_node
|
19
|
+
from kailash.nodes.base_async import AsyncNode
|
20
|
+
|
21
|
+
|
22
|
+
@register_node()
|
23
|
+
class EdgeMigrationNode(AsyncNode):
|
24
|
+
"""Node for edge migration operations.
|
25
|
+
|
26
|
+
This node provides capabilities for planning and executing live migrations
|
27
|
+
of workloads between edge nodes with minimal downtime.
|
28
|
+
|
29
|
+
Example:
|
30
|
+
>>> # Plan a migration
|
31
|
+
>>> result = await migration_node.execute_async(
|
32
|
+
... operation="plan_migration",
|
33
|
+
... source_edge="edge-west-1",
|
34
|
+
... target_edge="edge-east-1",
|
35
|
+
... workloads=["api-service", "cache-layer"],
|
36
|
+
... strategy="live"
|
37
|
+
... )
|
38
|
+
|
39
|
+
>>> # Execute the migration
|
40
|
+
>>> result = await migration_node.execute_async(
|
41
|
+
... operation="execute_migration",
|
42
|
+
... migration_id=result["plan"]["migration_id"]
|
43
|
+
... )
|
44
|
+
|
45
|
+
>>> # Check progress
|
46
|
+
>>> result = await migration_node.execute_async(
|
47
|
+
... operation="get_progress",
|
48
|
+
... migration_id="migration_123"
|
49
|
+
... )
|
50
|
+
|
51
|
+
>>> # Rollback if needed
|
52
|
+
>>> result = await migration_node.execute_async(
|
53
|
+
... operation="rollback_migration",
|
54
|
+
... migration_id="migration_123"
|
55
|
+
... )
|
56
|
+
"""
|
57
|
+
|
58
|
+
def __init__(self, **kwargs):
|
59
|
+
"""Initialize edge migration node."""
|
60
|
+
super().__init__(**kwargs)
|
61
|
+
|
62
|
+
# Extract configuration
|
63
|
+
checkpoint_interval = kwargs.get("checkpoint_interval", 60)
|
64
|
+
sync_batch_size = kwargs.get("sync_batch_size", 1000)
|
65
|
+
bandwidth_limit_mbps = kwargs.get("bandwidth_limit_mbps")
|
66
|
+
enable_compression = kwargs.get("enable_compression", True)
|
67
|
+
|
68
|
+
# Initialize migrator
|
69
|
+
self.migrator = EdgeMigrator(
|
70
|
+
checkpoint_interval=checkpoint_interval,
|
71
|
+
sync_batch_size=sync_batch_size,
|
72
|
+
bandwidth_limit_mbps=bandwidth_limit_mbps,
|
73
|
+
enable_compression=enable_compression,
|
74
|
+
)
|
75
|
+
|
76
|
+
self._migrator_started = False
|
77
|
+
|
78
|
+
@property
|
79
|
+
def input_parameters(self) -> Dict[str, NodeParameter]:
|
80
|
+
"""Define input parameters."""
|
81
|
+
return {
|
82
|
+
"operation": NodeParameter(
|
83
|
+
name="operation",
|
84
|
+
type=str,
|
85
|
+
required=True,
|
86
|
+
description="Operation to perform (plan_migration, execute_migration, get_progress, pause_migration, resume_migration, rollback_migration, get_active_migrations, get_history, get_metrics, start_migrator, stop_migrator)",
|
87
|
+
),
|
88
|
+
# For plan_migration
|
89
|
+
"source_edge": NodeParameter(
|
90
|
+
name="source_edge",
|
91
|
+
type=str,
|
92
|
+
required=False,
|
93
|
+
description="Source edge node",
|
94
|
+
),
|
95
|
+
"target_edge": NodeParameter(
|
96
|
+
name="target_edge",
|
97
|
+
type=str,
|
98
|
+
required=False,
|
99
|
+
description="Target edge node",
|
100
|
+
),
|
101
|
+
"workloads": NodeParameter(
|
102
|
+
name="workloads",
|
103
|
+
type=list,
|
104
|
+
required=False,
|
105
|
+
description="List of workloads to migrate",
|
106
|
+
),
|
107
|
+
"strategy": NodeParameter(
|
108
|
+
name="strategy",
|
109
|
+
type=str,
|
110
|
+
required=False,
|
111
|
+
default="live",
|
112
|
+
description="Migration strategy (live, staged, bulk, incremental, emergency)",
|
113
|
+
),
|
114
|
+
"constraints": NodeParameter(
|
115
|
+
name="constraints",
|
116
|
+
type=dict,
|
117
|
+
required=False,
|
118
|
+
default={},
|
119
|
+
description="Migration constraints (time_window, bandwidth, etc.)",
|
120
|
+
),
|
121
|
+
"priority": NodeParameter(
|
122
|
+
name="priority",
|
123
|
+
type=int,
|
124
|
+
required=False,
|
125
|
+
default=5,
|
126
|
+
description="Migration priority (1-10)",
|
127
|
+
),
|
128
|
+
# For other operations
|
129
|
+
"migration_id": NodeParameter(
|
130
|
+
name="migration_id",
|
131
|
+
type=str,
|
132
|
+
required=False,
|
133
|
+
description="Migration identifier",
|
134
|
+
),
|
135
|
+
"checkpoint_id": NodeParameter(
|
136
|
+
name="checkpoint_id",
|
137
|
+
type=str,
|
138
|
+
required=False,
|
139
|
+
description="Checkpoint identifier for rollback",
|
140
|
+
),
|
141
|
+
# Configuration
|
142
|
+
"checkpoint_interval": NodeParameter(
|
143
|
+
name="checkpoint_interval",
|
144
|
+
type=int,
|
145
|
+
required=False,
|
146
|
+
default=60,
|
147
|
+
description="Checkpoint creation interval (seconds)",
|
148
|
+
),
|
149
|
+
"sync_batch_size": NodeParameter(
|
150
|
+
name="sync_batch_size",
|
151
|
+
type=int,
|
152
|
+
required=False,
|
153
|
+
default=1000,
|
154
|
+
description="Records per sync batch",
|
155
|
+
),
|
156
|
+
"bandwidth_limit_mbps": NodeParameter(
|
157
|
+
name="bandwidth_limit_mbps",
|
158
|
+
type=float,
|
159
|
+
required=False,
|
160
|
+
description="Bandwidth limit in Mbps",
|
161
|
+
),
|
162
|
+
"enable_compression": NodeParameter(
|
163
|
+
name="enable_compression",
|
164
|
+
type=bool,
|
165
|
+
required=False,
|
166
|
+
default=True,
|
167
|
+
description="Enable data compression",
|
168
|
+
),
|
169
|
+
}
|
170
|
+
|
171
|
+
@property
|
172
|
+
def output_parameters(self) -> Dict[str, NodeParameter]:
|
173
|
+
"""Define output parameters."""
|
174
|
+
return {
|
175
|
+
"status": NodeParameter(
|
176
|
+
name="status", type=str, description="Operation status"
|
177
|
+
),
|
178
|
+
"plan": NodeParameter(
|
179
|
+
name="plan",
|
180
|
+
type=dict,
|
181
|
+
required=False,
|
182
|
+
description="Migration plan details",
|
183
|
+
),
|
184
|
+
"progress": NodeParameter(
|
185
|
+
name="progress",
|
186
|
+
type=dict,
|
187
|
+
required=False,
|
188
|
+
description="Migration progress information",
|
189
|
+
),
|
190
|
+
"result": NodeParameter(
|
191
|
+
name="result", type=dict, required=False, description="Operation result"
|
192
|
+
),
|
193
|
+
"migrations": NodeParameter(
|
194
|
+
name="migrations",
|
195
|
+
type=list,
|
196
|
+
required=False,
|
197
|
+
description="List of migrations",
|
198
|
+
),
|
199
|
+
"metrics": NodeParameter(
|
200
|
+
name="metrics",
|
201
|
+
type=dict,
|
202
|
+
required=False,
|
203
|
+
description="Migration metrics",
|
204
|
+
),
|
205
|
+
"migrator_active": NodeParameter(
|
206
|
+
name="migrator_active",
|
207
|
+
type=bool,
|
208
|
+
required=False,
|
209
|
+
description="Whether migrator service is active",
|
210
|
+
),
|
211
|
+
}
|
212
|
+
|
213
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
214
|
+
"""Get all node parameters for compatibility."""
|
215
|
+
return self.input_parameters
|
216
|
+
|
217
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
218
|
+
"""Execute migration operation."""
|
219
|
+
operation = kwargs["operation"]
|
220
|
+
|
221
|
+
try:
|
222
|
+
if operation == "plan_migration":
|
223
|
+
return await self._plan_migration(kwargs)
|
224
|
+
elif operation == "execute_migration":
|
225
|
+
return await self._execute_migration(kwargs)
|
226
|
+
elif operation == "get_progress":
|
227
|
+
return await self._get_progress(kwargs)
|
228
|
+
elif operation == "pause_migration":
|
229
|
+
return await self._pause_migration(kwargs)
|
230
|
+
elif operation == "resume_migration":
|
231
|
+
return await self._resume_migration(kwargs)
|
232
|
+
elif operation == "rollback_migration":
|
233
|
+
return await self._rollback_migration(kwargs)
|
234
|
+
elif operation == "get_active_migrations":
|
235
|
+
return await self._get_active_migrations()
|
236
|
+
elif operation == "get_history":
|
237
|
+
return await self._get_history()
|
238
|
+
elif operation == "get_metrics":
|
239
|
+
return await self._get_metrics()
|
240
|
+
elif operation == "start_migrator":
|
241
|
+
return await self._start_migrator()
|
242
|
+
elif operation == "stop_migrator":
|
243
|
+
return await self._stop_migrator()
|
244
|
+
else:
|
245
|
+
raise ValueError(f"Unknown operation: {operation}")
|
246
|
+
|
247
|
+
except Exception as e:
|
248
|
+
self.logger.error(f"Edge migration operation failed: {str(e)}")
|
249
|
+
return {"status": "error", "error": str(e)}
|
250
|
+
|
251
|
+
async def _plan_migration(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
252
|
+
"""Plan a migration."""
|
253
|
+
# Parse strategy
|
254
|
+
strategy_name = kwargs.get("strategy", "live")
|
255
|
+
try:
|
256
|
+
strategy = MigrationStrategy(strategy_name)
|
257
|
+
except ValueError:
|
258
|
+
strategy = MigrationStrategy.LIVE
|
259
|
+
|
260
|
+
# Create plan
|
261
|
+
plan = await self.migrator.plan_migration(
|
262
|
+
source_edge=kwargs.get("source_edge", "unknown"),
|
263
|
+
target_edge=kwargs.get("target_edge", "unknown"),
|
264
|
+
workloads=kwargs.get("workloads", []),
|
265
|
+
strategy=strategy,
|
266
|
+
constraints=kwargs.get("constraints", {}),
|
267
|
+
)
|
268
|
+
|
269
|
+
# Set priority
|
270
|
+
plan.priority = kwargs.get("priority", 5)
|
271
|
+
|
272
|
+
return {
|
273
|
+
"status": "success",
|
274
|
+
"plan": plan.to_dict(),
|
275
|
+
"estimated_duration": self._estimate_duration(plan),
|
276
|
+
}
|
277
|
+
|
278
|
+
async def _execute_migration(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
279
|
+
"""Execute a migration."""
|
280
|
+
migration_id = kwargs.get("migration_id")
|
281
|
+
if not migration_id:
|
282
|
+
raise ValueError("migration_id is required")
|
283
|
+
|
284
|
+
# Start execution asynchronously
|
285
|
+
result = await self.migrator.execute_migration(migration_id)
|
286
|
+
|
287
|
+
return {"status": "success", "result": result}
|
288
|
+
|
289
|
+
async def _get_progress(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
290
|
+
"""Get migration progress."""
|
291
|
+
migration_id = kwargs.get("migration_id")
|
292
|
+
if not migration_id:
|
293
|
+
raise ValueError("migration_id is required")
|
294
|
+
|
295
|
+
progress = await self.migrator.get_progress(migration_id)
|
296
|
+
|
297
|
+
return {"status": "success", "progress": progress.to_dict()}
|
298
|
+
|
299
|
+
async def _pause_migration(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
300
|
+
"""Pause a migration."""
|
301
|
+
migration_id = kwargs.get("migration_id")
|
302
|
+
if not migration_id:
|
303
|
+
raise ValueError("migration_id is required")
|
304
|
+
|
305
|
+
result = await self.migrator.pause_migration(migration_id)
|
306
|
+
|
307
|
+
return {"status": "success", "result": result}
|
308
|
+
|
309
|
+
async def _resume_migration(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
310
|
+
"""Resume a migration."""
|
311
|
+
migration_id = kwargs.get("migration_id")
|
312
|
+
if not migration_id:
|
313
|
+
raise ValueError("migration_id is required")
|
314
|
+
|
315
|
+
result = await self.migrator.resume_migration(migration_id)
|
316
|
+
|
317
|
+
return {"status": "success", "result": result}
|
318
|
+
|
319
|
+
async def _rollback_migration(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
320
|
+
"""Rollback a migration."""
|
321
|
+
migration_id = kwargs.get("migration_id")
|
322
|
+
if not migration_id:
|
323
|
+
raise ValueError("migration_id is required")
|
324
|
+
|
325
|
+
checkpoint_id = kwargs.get("checkpoint_id")
|
326
|
+
|
327
|
+
result = await self.migrator.rollback_migration(migration_id, checkpoint_id)
|
328
|
+
|
329
|
+
return {"status": "success", "result": result}
|
330
|
+
|
331
|
+
async def _get_active_migrations(self) -> Dict[str, Any]:
|
332
|
+
"""Get active migrations."""
|
333
|
+
migrations = self.migrator.get_active_migrations()
|
334
|
+
|
335
|
+
return {
|
336
|
+
"status": "success",
|
337
|
+
"migrations": [m.to_dict() for m in migrations],
|
338
|
+
"count": len(migrations),
|
339
|
+
}
|
340
|
+
|
341
|
+
async def _get_history(self) -> Dict[str, Any]:
|
342
|
+
"""Get migration history."""
|
343
|
+
history = self.migrator.get_migration_history()
|
344
|
+
|
345
|
+
return {"status": "success", "migrations": history, "count": len(history)}
|
346
|
+
|
347
|
+
async def _get_metrics(self) -> Dict[str, Any]:
|
348
|
+
"""Get migration metrics."""
|
349
|
+
metrics = self.migrator.get_migration_metrics()
|
350
|
+
|
351
|
+
return {"status": "success", "metrics": metrics}
|
352
|
+
|
353
|
+
async def _start_migrator(self) -> Dict[str, Any]:
|
354
|
+
"""Start migrator service."""
|
355
|
+
if not self._migrator_started:
|
356
|
+
await self.migrator.start()
|
357
|
+
self._migrator_started = True
|
358
|
+
|
359
|
+
return {"status": "success", "migrator_active": True}
|
360
|
+
|
361
|
+
async def _stop_migrator(self) -> Dict[str, Any]:
|
362
|
+
"""Stop migrator service."""
|
363
|
+
if self._migrator_started:
|
364
|
+
await self.migrator.stop()
|
365
|
+
self._migrator_started = False
|
366
|
+
|
367
|
+
return {"status": "success", "migrator_active": False}
|
368
|
+
|
369
|
+
def _estimate_duration(self, plan: MigrationPlan) -> float:
|
370
|
+
"""Estimate migration duration in seconds."""
|
371
|
+
# Simple estimation based on data size and strategy
|
372
|
+
base_time = plan.data_size_estimate / (100 * 1024 * 1024) # 100MB/s baseline
|
373
|
+
|
374
|
+
strategy_multipliers = {
|
375
|
+
MigrationStrategy.LIVE: 1.5, # Extra time for live sync
|
376
|
+
MigrationStrategy.STAGED: 1.2, # Controlled phases
|
377
|
+
MigrationStrategy.BULK: 1.0, # Fastest
|
378
|
+
MigrationStrategy.INCREMENTAL: 2.0, # Multiple passes
|
379
|
+
MigrationStrategy.EMERGENCY: 0.8, # Fast but risky
|
380
|
+
}
|
381
|
+
|
382
|
+
multiplier = strategy_multipliers.get(plan.strategy, 1.0)
|
383
|
+
|
384
|
+
# Add overhead for validation and cleanup
|
385
|
+
overhead = 60 * len(plan.workloads) # 1 minute per workload
|
386
|
+
|
387
|
+
return base_time * multiplier + overhead
|
388
|
+
|
389
|
+
async def cleanup(self):
|
390
|
+
"""Clean up resources."""
|
391
|
+
if self._migrator_started:
|
392
|
+
await self.migrator.stop()
|