kailash 0.8.5__py3-none-any.whl → 0.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. kailash/__init__.py +5 -5
  2. kailash/channels/__init__.py +2 -1
  3. kailash/channels/mcp_channel.py +23 -4
  4. kailash/cli/validate_imports.py +202 -0
  5. kailash/core/resilience/bulkhead.py +15 -5
  6. kailash/core/resilience/circuit_breaker.py +4 -1
  7. kailash/core/resilience/health_monitor.py +312 -84
  8. kailash/edge/migration/edge_migration_service.py +384 -0
  9. kailash/mcp_server/protocol.py +26 -0
  10. kailash/mcp_server/server.py +1081 -8
  11. kailash/mcp_server/subscriptions.py +1560 -0
  12. kailash/mcp_server/transports.py +305 -0
  13. kailash/middleware/gateway/event_store.py +1 -0
  14. kailash/nodes/base.py +77 -1
  15. kailash/nodes/code/python.py +44 -3
  16. kailash/nodes/data/async_sql.py +42 -20
  17. kailash/nodes/edge/edge_migration_node.py +16 -12
  18. kailash/nodes/governance.py +410 -0
  19. kailash/nodes/rag/registry.py +1 -1
  20. kailash/nodes/transaction/distributed_transaction_manager.py +48 -1
  21. kailash/nodes/transaction/saga_state_storage.py +2 -1
  22. kailash/nodes/validation.py +8 -8
  23. kailash/runtime/local.py +30 -0
  24. kailash/runtime/validation/__init__.py +7 -15
  25. kailash/runtime/validation/import_validator.py +446 -0
  26. kailash/runtime/validation/suggestion_engine.py +5 -5
  27. kailash/utils/data_paths.py +74 -0
  28. kailash/workflow/builder.py +183 -4
  29. kailash/workflow/mermaid_visualizer.py +3 -1
  30. kailash/workflow/templates.py +6 -6
  31. kailash/workflow/validation.py +134 -3
  32. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/METADATA +20 -17
  33. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/RECORD +37 -31
  34. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/WHEEL +0 -0
  35. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/entry_points.txt +0 -0
  36. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/licenses/LICENSE +0 -0
  37. {kailash-0.8.5.dist-info → kailash-0.8.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,384 @@
1
+ """EdgeMigrationService singleton for shared migration state management.
2
+
3
+ This module provides a singleton EdgeMigrationService class that manages
4
+ shared migration state across all EdgeMigrationNode instances, enabling
5
+ proper cross-node migration workflows.
6
+ """
7
+
8
+ import asyncio
9
+ import hashlib
10
+ import logging
11
+ import threading
12
+ import time
13
+ from collections import defaultdict
14
+ from datetime import datetime, timedelta
15
+ from typing import Any, Dict, List, Optional
16
+
17
+ from kailash.edge.migration.edge_migrator import (
18
+ EdgeMigrator,
19
+ MigrationPhase,
20
+ MigrationPlan,
21
+ MigrationProgress,
22
+ MigrationStrategy,
23
+ )
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class EdgeMigrationService:
29
+ """Singleton service managing shared migration state across EdgeMigrationNode instances.
30
+
31
+ This service provides centralized management of migration plans, progress tracking,
32
+ and shared state coordination. It follows the singleton pattern to ensure
33
+ state consistency across multiple EdgeMigrationNode instances.
34
+ """
35
+
36
+ _instance: Optional["EdgeMigrationService"] = None
37
+ _lock = threading.Lock()
38
+ _state_lock = threading.RLock() # For state access synchronization
39
+
40
+ def __new__(cls, config: Optional[Dict[str, Any]] = None):
41
+ """Create or return the singleton instance.
42
+
43
+ Args:
44
+ config: Migration service configuration
45
+
46
+ Returns:
47
+ The singleton EdgeMigrationService instance
48
+ """
49
+ if cls._instance is None:
50
+ with cls._lock:
51
+ # Double-check locking pattern
52
+ if cls._instance is None:
53
+ cls._instance = super().__new__(cls)
54
+ cls._instance._initialized = False
55
+ return cls._instance
56
+
57
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
58
+ """Initialize the migration service.
59
+
60
+ Args:
61
+ config: Migration service configuration
62
+ """
63
+ # Only initialize once
64
+ if self._initialized:
65
+ return
66
+
67
+ with self._lock:
68
+ if self._initialized:
69
+ return
70
+
71
+ logger.info("Initializing EdgeMigrationService singleton")
72
+
73
+ # Merge with defaults
74
+ self._config = self._merge_with_defaults(config or {})
75
+
76
+ # Shared migration state (moved from EdgeMigrator instances)
77
+ self._active_migrations: Dict[str, MigrationPlan] = {}
78
+ self._migration_progress: Dict[str, MigrationProgress] = {}
79
+ self._checkpoints: Dict[str, List] = defaultdict(list)
80
+ self._completed_migrations: List[str] = []
81
+ self._failed_migrations: Dict[str, str] = {} # migration_id -> error
82
+
83
+ # Migration ID reservation tracking
84
+ self._reserved_ids: set = set()
85
+
86
+ # Migrator instances per configuration
87
+ self._migrators: Dict[str, EdgeMigrator] = {}
88
+
89
+ # Metrics tracking
90
+ self._metrics = {
91
+ "total_migrations": 0,
92
+ "active_migrations": 0,
93
+ "completed_migrations": 0,
94
+ "failed_migrations": 0,
95
+ "success_rate": 100.0,
96
+ }
97
+
98
+ # Service state
99
+ self._start_time = time.time()
100
+ self._initialized = True
101
+
102
+ def _merge_with_defaults(self, config: Dict[str, Any]) -> Dict[str, Any]:
103
+ """Merge user config with default values.
104
+
105
+ Args:
106
+ config: User-provided configuration
107
+
108
+ Returns:
109
+ Merged configuration
110
+ """
111
+ defaults = {
112
+ "checkpoint_interval": 60,
113
+ "sync_batch_size": 1000,
114
+ "bandwidth_limit_mbps": None,
115
+ "enable_compression": True,
116
+ "max_concurrent_migrations": 5,
117
+ "cleanup_completed_after": 3600, # 1 hour in seconds
118
+ }
119
+
120
+ # Deep merge
121
+ merged = defaults.copy()
122
+ merged.update(config)
123
+ return merged
124
+
125
+ def get_configuration(self) -> Dict[str, Any]:
126
+ """Get current configuration.
127
+
128
+ Returns:
129
+ Current service configuration
130
+ """
131
+ return self._config.copy()
132
+
133
+ def store_migration_plan(self, plan: MigrationPlan) -> None:
134
+ """Store a migration plan in shared state.
135
+
136
+ Args:
137
+ plan: Migration plan to store
138
+ """
139
+ with self._state_lock:
140
+ self._active_migrations[plan.migration_id] = plan
141
+
142
+ # Initialize progress tracking
143
+ if plan.migration_id not in self._migration_progress:
144
+ self._migration_progress[plan.migration_id] = MigrationProgress(
145
+ migration_id=plan.migration_id,
146
+ phase=MigrationPhase.PLANNING,
147
+ progress_percent=0.0,
148
+ data_transferred=0,
149
+ workloads_migrated=[],
150
+ start_time=datetime.now(),
151
+ )
152
+
153
+ # Update metrics
154
+ self._metrics["total_migrations"] = len(self._active_migrations) + len(
155
+ self._completed_migrations
156
+ )
157
+ self._metrics["active_migrations"] = len(self._active_migrations)
158
+
159
+ def get_migration_plan(self, migration_id: str) -> Optional[MigrationPlan]:
160
+ """Get a migration plan from shared state.
161
+
162
+ Args:
163
+ migration_id: ID of migration to retrieve
164
+
165
+ Returns:
166
+ Migration plan if found, None otherwise
167
+ """
168
+ with self._state_lock:
169
+ return self._active_migrations.get(migration_id)
170
+
171
+ def get_migration_progress(self, migration_id: str) -> Optional[MigrationProgress]:
172
+ """Get migration progress from shared state.
173
+
174
+ Args:
175
+ migration_id: ID of migration to get progress for
176
+
177
+ Returns:
178
+ Migration progress if found, None otherwise
179
+ """
180
+ with self._state_lock:
181
+ return self._migration_progress.get(migration_id)
182
+
183
+ def update_migration_progress(self, progress: MigrationProgress) -> None:
184
+ """Update migration progress in shared state.
185
+
186
+ Args:
187
+ progress: Updated migration progress
188
+ """
189
+ with self._state_lock:
190
+ self._migration_progress[progress.migration_id] = progress
191
+
192
+ def reserve_migration_id(
193
+ self, source_edge: str, target_edge: str, workloads: List[str]
194
+ ) -> str:
195
+ """Reserve a unique migration ID to prevent collisions.
196
+
197
+ Args:
198
+ source_edge: Source edge node
199
+ target_edge: Target edge node
200
+ workloads: List of workloads
201
+
202
+ Returns:
203
+ Reserved migration ID
204
+
205
+ Raises:
206
+ ValueError: If migration ID collision detected
207
+ """
208
+ with self._state_lock:
209
+ # Generate ID based on parameters and timestamp
210
+ timestamp = str(int(time.time() * 1000)) # millisecond precision
211
+ content = (
212
+ f"{source_edge}-{target_edge}-{'-'.join(sorted(workloads))}-{timestamp}"
213
+ )
214
+ migration_id = hashlib.md5(content.encode()).hexdigest()[:12]
215
+
216
+ # Check for collision
217
+ if (
218
+ migration_id in self._reserved_ids
219
+ or migration_id in self._active_migrations
220
+ ):
221
+ raise ValueError(f"Migration ID collision detected: {migration_id}")
222
+
223
+ # Reserve the ID
224
+ self._reserved_ids.add(migration_id)
225
+ return f"migration-{migration_id}"
226
+
227
+ def mark_migration_completed(self, migration_id: str) -> None:
228
+ """Mark a migration as completed.
229
+
230
+ Args:
231
+ migration_id: ID of completed migration
232
+ """
233
+ with self._state_lock:
234
+ if migration_id in self._active_migrations:
235
+ del self._active_migrations[migration_id]
236
+ self._completed_migrations.append(migration_id)
237
+
238
+ # Update metrics
239
+ self._metrics["active_migrations"] = len(self._active_migrations)
240
+ self._metrics["completed_migrations"] = len(self._completed_migrations)
241
+ self._update_success_rate()
242
+
243
+ def mark_migration_failed(self, migration_id: str, error: str) -> None:
244
+ """Mark a migration as failed.
245
+
246
+ Args:
247
+ migration_id: ID of failed migration
248
+ error: Error message
249
+ """
250
+ with self._state_lock:
251
+ if migration_id in self._active_migrations:
252
+ del self._active_migrations[migration_id]
253
+ self._failed_migrations[migration_id] = error
254
+
255
+ # Update metrics
256
+ self._metrics["active_migrations"] = len(self._active_migrations)
257
+ self._metrics["failed_migrations"] = len(self._failed_migrations)
258
+ self._update_success_rate()
259
+
260
+ def _update_success_rate(self) -> None:
261
+ """Update success rate metric."""
262
+ total_completed = len(self._completed_migrations) + len(self._failed_migrations)
263
+ if total_completed > 0:
264
+ success_rate = (len(self._completed_migrations) / total_completed) * 100
265
+ self._metrics["success_rate"] = round(success_rate, 2)
266
+ else:
267
+ self._metrics["success_rate"] = 100.0
268
+
269
+ def get_active_migration_count(self) -> int:
270
+ """Get count of active migrations.
271
+
272
+ Returns:
273
+ Number of active migrations
274
+ """
275
+ with self._state_lock:
276
+ return len(self._active_migrations)
277
+
278
+ def get_completed_migration_count(self) -> int:
279
+ """Get count of completed migrations.
280
+
281
+ Returns:
282
+ Number of completed migrations
283
+ """
284
+ with self._state_lock:
285
+ return len(self._completed_migrations)
286
+
287
+ def cleanup_old_migrations(self) -> None:
288
+ """Clean up old completed migrations based on configuration."""
289
+ with self._state_lock:
290
+ cleanup_threshold = self._config["cleanup_completed_after"]
291
+ current_time = time.time()
292
+
293
+ # For now, just limit the number of completed migrations kept
294
+ # In a real implementation, we'd track completion timestamps
295
+ max_completed = 100
296
+ if len(self._completed_migrations) > max_completed:
297
+ # Keep only the most recent ones
298
+ self._completed_migrations = self._completed_migrations[-max_completed:]
299
+ self._metrics["completed_migrations"] = len(self._completed_migrations)
300
+
301
+ def get_migration_metrics(self) -> Dict[str, Any]:
302
+ """Get migration metrics.
303
+
304
+ Returns:
305
+ Dictionary of migration metrics
306
+ """
307
+ with self._state_lock:
308
+ return self._metrics.copy()
309
+
310
+ async def plan_migration_async(
311
+ self,
312
+ source_edge: str,
313
+ target_edge: str,
314
+ workloads: List[str],
315
+ strategy: MigrationStrategy = MigrationStrategy.LIVE,
316
+ constraints: Optional[Dict[str, Any]] = None,
317
+ ) -> MigrationPlan:
318
+ """Plan a migration asynchronously.
319
+
320
+ Args:
321
+ source_edge: Source edge node
322
+ target_edge: Target edge node
323
+ workloads: List of workloads to migrate
324
+ strategy: Migration strategy
325
+ constraints: Optional constraints
326
+
327
+ Returns:
328
+ Created migration plan
329
+ """
330
+ # Reserve unique migration ID
331
+ migration_id = self.reserve_migration_id(source_edge, target_edge, workloads)
332
+
333
+ # Create migration plan
334
+ plan = MigrationPlan(
335
+ migration_id=migration_id,
336
+ source_edge=source_edge,
337
+ target_edge=target_edge,
338
+ strategy=strategy,
339
+ workloads=workloads,
340
+ data_size_estimate=len(workloads) * 1024 * 1024, # Rough estimate
341
+ constraints=constraints or {},
342
+ )
343
+
344
+ # Store in shared state
345
+ self.store_migration_plan(plan)
346
+
347
+ return plan
348
+
349
+ def get_migrator_for_node(
350
+ self, node_id: str, node_config: Optional[Dict[str, Any]] = None
351
+ ) -> EdgeMigrator:
352
+ """Get or create a migrator instance for a specific node.
353
+
354
+ Args:
355
+ node_id: Unique identifier for the node
356
+ node_config: Node-specific configuration
357
+
358
+ Returns:
359
+ EdgeMigrator instance configured for the node
360
+ """
361
+ with self._state_lock:
362
+ if node_id not in self._migrators:
363
+ # Merge node config with service defaults
364
+ migrator_config = self._config.copy()
365
+ if node_config:
366
+ migrator_config.update(node_config)
367
+
368
+ # Create migrator with shared state access
369
+ migrator = EdgeMigrator(
370
+ checkpoint_interval=migrator_config.get("checkpoint_interval", 60),
371
+ sync_batch_size=migrator_config.get("sync_batch_size", 1000),
372
+ bandwidth_limit_mbps=migrator_config.get("bandwidth_limit_mbps"),
373
+ enable_compression=migrator_config.get("enable_compression", True),
374
+ )
375
+
376
+ # Override migrator's state dictionaries to use shared state
377
+ migrator.active_migrations = self._active_migrations
378
+ migrator.migration_progress = self._migration_progress
379
+ migrator.checkpoints = self._checkpoints
380
+ migrator.completed_migrations = self._completed_migrations
381
+
382
+ self._migrators[node_id] = migrator
383
+
384
+ return self._migrators[node_id]
@@ -58,6 +58,7 @@ import time
58
58
  import uuid
59
59
  from abc import ABC, abstractmethod
60
60
  from dataclasses import asdict, dataclass, field
61
+ from datetime import datetime
61
62
  from enum import Enum
62
63
  from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union
63
64
 
@@ -322,6 +323,31 @@ class ResourceTemplate:
322
323
  return result
323
324
 
324
325
 
326
+ class ResourceChangeType(Enum):
327
+ """Types of resource changes."""
328
+
329
+ CREATED = "created"
330
+ UPDATED = "updated"
331
+ DELETED = "deleted"
332
+
333
+
334
+ @dataclass
335
+ class ResourceChange:
336
+ """Represents a resource change event."""
337
+
338
+ type: ResourceChangeType
339
+ uri: str
340
+ timestamp: datetime
341
+
342
+ def to_dict(self) -> Dict[str, Any]:
343
+ """Convert to dictionary for serialization."""
344
+ return {
345
+ "type": self.type.value,
346
+ "uri": self.uri,
347
+ "timestamp": self.timestamp.isoformat(),
348
+ }
349
+
350
+
325
351
  @dataclass
326
352
  class ToolResult:
327
353
  """Enhanced tool result with structured content."""