kailash 0.8.4__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. kailash/__init__.py +1 -7
  2. kailash/cli/__init__.py +11 -1
  3. kailash/cli/validation_audit.py +570 -0
  4. kailash/core/actors/supervisor.py +1 -1
  5. kailash/core/resilience/circuit_breaker.py +71 -1
  6. kailash/core/resilience/health_monitor.py +172 -0
  7. kailash/edge/compliance.py +33 -0
  8. kailash/edge/consistency.py +609 -0
  9. kailash/edge/coordination/__init__.py +30 -0
  10. kailash/edge/coordination/global_ordering.py +355 -0
  11. kailash/edge/coordination/leader_election.py +217 -0
  12. kailash/edge/coordination/partition_detector.py +296 -0
  13. kailash/edge/coordination/raft.py +485 -0
  14. kailash/edge/discovery.py +63 -1
  15. kailash/edge/migration/__init__.py +19 -0
  16. kailash/edge/migration/edge_migrator.py +832 -0
  17. kailash/edge/monitoring/__init__.py +21 -0
  18. kailash/edge/monitoring/edge_monitor.py +736 -0
  19. kailash/edge/prediction/__init__.py +10 -0
  20. kailash/edge/prediction/predictive_warmer.py +591 -0
  21. kailash/edge/resource/__init__.py +102 -0
  22. kailash/edge/resource/cloud_integration.py +796 -0
  23. kailash/edge/resource/cost_optimizer.py +949 -0
  24. kailash/edge/resource/docker_integration.py +919 -0
  25. kailash/edge/resource/kubernetes_integration.py +893 -0
  26. kailash/edge/resource/platform_integration.py +913 -0
  27. kailash/edge/resource/predictive_scaler.py +959 -0
  28. kailash/edge/resource/resource_analyzer.py +824 -0
  29. kailash/edge/resource/resource_pools.py +610 -0
  30. kailash/integrations/dataflow_edge.py +261 -0
  31. kailash/mcp_server/registry_integration.py +1 -1
  32. kailash/monitoring/__init__.py +18 -0
  33. kailash/monitoring/alerts.py +646 -0
  34. kailash/monitoring/metrics.py +677 -0
  35. kailash/nodes/__init__.py +2 -0
  36. kailash/nodes/ai/semantic_memory.py +2 -2
  37. kailash/nodes/base.py +545 -0
  38. kailash/nodes/edge/__init__.py +36 -0
  39. kailash/nodes/edge/base.py +240 -0
  40. kailash/nodes/edge/cloud_node.py +710 -0
  41. kailash/nodes/edge/coordination.py +239 -0
  42. kailash/nodes/edge/docker_node.py +825 -0
  43. kailash/nodes/edge/edge_data.py +582 -0
  44. kailash/nodes/edge/edge_migration_node.py +392 -0
  45. kailash/nodes/edge/edge_monitoring_node.py +421 -0
  46. kailash/nodes/edge/edge_state.py +673 -0
  47. kailash/nodes/edge/edge_warming_node.py +393 -0
  48. kailash/nodes/edge/kubernetes_node.py +652 -0
  49. kailash/nodes/edge/platform_node.py +766 -0
  50. kailash/nodes/edge/resource_analyzer_node.py +378 -0
  51. kailash/nodes/edge/resource_optimizer_node.py +501 -0
  52. kailash/nodes/edge/resource_scaler_node.py +397 -0
  53. kailash/nodes/ports.py +676 -0
  54. kailash/runtime/local.py +344 -1
  55. kailash/runtime/validation/__init__.py +20 -0
  56. kailash/runtime/validation/connection_context.py +119 -0
  57. kailash/runtime/validation/enhanced_error_formatter.py +202 -0
  58. kailash/runtime/validation/error_categorizer.py +164 -0
  59. kailash/runtime/validation/metrics.py +380 -0
  60. kailash/runtime/validation/performance.py +615 -0
  61. kailash/runtime/validation/suggestion_engine.py +212 -0
  62. kailash/testing/fixtures.py +2 -2
  63. kailash/workflow/builder.py +230 -4
  64. kailash/workflow/contracts.py +418 -0
  65. kailash/workflow/edge_infrastructure.py +369 -0
  66. kailash/workflow/migration.py +3 -3
  67. kailash/workflow/type_inference.py +669 -0
  68. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/METADATA +43 -27
  69. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/RECORD +73 -27
  70. kailash/nexus/__init__.py +0 -21
  71. kailash/nexus/cli/__init__.py +0 -5
  72. kailash/nexus/cli/__main__.py +0 -6
  73. kailash/nexus/cli/main.py +0 -176
  74. kailash/nexus/factory.py +0 -413
  75. kailash/nexus/gateway.py +0 -545
  76. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
  77. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
  78. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
  79. {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,582 @@
1
+ """Edge data node for distributed data management with consistency guarantees."""
2
+
3
+ import asyncio
4
+ import hashlib
5
+ import json
6
+ from datetime import UTC, datetime
7
+ from enum import Enum
8
+ from typing import Any, Dict, List, Optional, Set
9
+
10
+ from kailash.edge.location import EdgeLocation
11
+ from kailash.nodes.base import NodeParameter, register_node
12
+
13
+ from .base import EdgeNode
14
+
15
+
16
+ class ConsistencyModel(Enum):
17
+ """Data consistency models for edge operations."""
18
+
19
+ STRONG = "strong" # 2PC - All replicas must acknowledge
20
+ EVENTUAL = "eventual" # Async replication
21
+ CAUSAL = "causal" # Causally consistent updates
22
+ BOUNDED_STALENESS = "bounded_staleness" # Max staleness threshold
23
+
24
+
25
+ class ReplicationStatus:
26
+ """Track replication status across edges."""
27
+
28
+ def __init__(self):
29
+ self.pending: Set[str] = set()
30
+ self.completed: Set[str] = set()
31
+ self.failed: Set[str] = set()
32
+ self.start_time = datetime.now(UTC)
33
+
34
+ @property
35
+ def is_complete(self) -> bool:
36
+ return len(self.pending) == 0
37
+
38
+ @property
39
+ def success_rate(self) -> float:
40
+ total = len(self.completed) + len(self.failed)
41
+ return len(self.completed) / total if total > 0 else 0.0
42
+
43
+
44
+ @register_node()
45
+ class EdgeDataNode(EdgeNode):
46
+ """Distributed data node with multi-edge replication and consistency.
47
+
48
+ Features:
49
+ - Multiple consistency models
50
+ - Automatic replication across edges
51
+ - Conflict resolution
52
+ - Compliance-aware data placement
53
+ """
54
+
55
+ def get_parameters(self) -> Dict[str, NodeParameter]:
56
+ """Get node parameters."""
57
+ return {
58
+ "action": NodeParameter(
59
+ name="action",
60
+ type=str,
61
+ default="read",
62
+ required=False,
63
+ description="Operation to perform (read|write|replicate|sync)",
64
+ ),
65
+ "key": NodeParameter(
66
+ name="key",
67
+ type=str,
68
+ required=False,
69
+ description="Data key for operations",
70
+ ),
71
+ "data": NodeParameter(
72
+ name="data", type=dict, required=False, description="Data to write"
73
+ ),
74
+ "target_edges": NodeParameter(
75
+ name="target_edges",
76
+ type=list,
77
+ required=False,
78
+ description="Target edges for replication",
79
+ ),
80
+ "keys": NodeParameter(
81
+ name="keys", type=list, required=False, description="Keys to sync"
82
+ ),
83
+ "consistency": NodeParameter(
84
+ name="consistency",
85
+ type=str,
86
+ default="eventual",
87
+ required=False,
88
+ description="Consistency model (strong|eventual|causal|bounded_staleness)",
89
+ ),
90
+ "replication_factor": NodeParameter(
91
+ name="replication_factor",
92
+ type=int,
93
+ default=3,
94
+ required=False,
95
+ description="Number of edge replicas to maintain",
96
+ ),
97
+ "staleness_threshold_ms": NodeParameter(
98
+ name="staleness_threshold_ms",
99
+ type=int,
100
+ default=5000,
101
+ required=False,
102
+ description="Max staleness for bounded consistency (ms)",
103
+ ),
104
+ "conflict_resolution": NodeParameter(
105
+ name="conflict_resolution",
106
+ type=str,
107
+ default="last_write_wins",
108
+ required=False,
109
+ description="Conflict resolution strategy",
110
+ ),
111
+ }
112
+
113
+ def __init__(self, **config):
114
+ """Initialize edge data node."""
115
+ super().__init__(**config)
116
+
117
+ # Data storage per edge (simulated)
118
+ self._edge_data: Dict[str, Dict[str, Any]] = {}
119
+ self._data_versions: Dict[str, Dict[str, int]] = {}
120
+ self._replication_tasks: Dict[str, asyncio.Task] = {}
121
+
122
+ async def async_run(self, **kwargs) -> Dict[str, Any]:
123
+ """Execute edge data operation."""
124
+ # Get action from kwargs first, then from config
125
+ action = kwargs.get("action") or self.config.get("action", "read")
126
+
127
+ if action == "write":
128
+ return await self._handle_write(kwargs)
129
+ elif action == "read":
130
+ return await self._handle_read(kwargs)
131
+ elif action == "replicate":
132
+ return await self._handle_replicate(kwargs)
133
+ elif action == "sync":
134
+ return await self._handle_sync(kwargs)
135
+ else:
136
+ raise ValueError(f"Unknown action: {action}")
137
+
138
+ async def _handle_write(self, params: Dict[str, Any]) -> Dict[str, Any]:
139
+ """Handle write operation with consistency guarantees."""
140
+ key = params.get("key")
141
+ data = params.get("data")
142
+ consistency = ConsistencyModel(params.get("consistency", "eventual"))
143
+
144
+ if not key or data is None:
145
+ raise ValueError("Write requires 'key' and 'data'")
146
+
147
+ # Ensure compliance for data placement
148
+ if not await self.ensure_compliance({"data": data}):
149
+ return {"success": False, "error": "No compliant edge available for data"}
150
+
151
+ # Generate version
152
+ version = self._get_next_version(key)
153
+
154
+ # Store locally first
155
+ edge_name = self.current_edge.name
156
+ if edge_name not in self._edge_data:
157
+ self._edge_data[edge_name] = {}
158
+ self._data_versions[edge_name] = {}
159
+
160
+ self._edge_data[edge_name][key] = {
161
+ "data": data,
162
+ "version": version,
163
+ "timestamp": datetime.now(UTC).isoformat(),
164
+ "edge": edge_name,
165
+ }
166
+ self._data_versions[edge_name][key] = version
167
+
168
+ # Handle consistency model
169
+ replication_status = ReplicationStatus()
170
+
171
+ if consistency == ConsistencyModel.STRONG:
172
+ # Synchronous replication to all replicas
173
+ await self._replicate_strong(key, data, version, replication_status)
174
+ else:
175
+ # Async replication for other models
176
+ task = asyncio.create_task(
177
+ self._replicate_async(
178
+ key, data, version, consistency, replication_status
179
+ )
180
+ )
181
+ self._replication_tasks[f"{key}:{version}"] = task
182
+
183
+ return {
184
+ "success": True,
185
+ "key": key,
186
+ "version": version,
187
+ "edge": edge_name,
188
+ "consistency": consistency.value,
189
+ "replication_status": {
190
+ "pending": len(replication_status.pending),
191
+ "completed": len(replication_status.completed),
192
+ "failed": len(replication_status.failed),
193
+ },
194
+ }
195
+
196
+ async def _handle_read(self, params: Dict[str, Any]) -> Dict[str, Any]:
197
+ """Handle read operation with consistency awareness."""
198
+ key = params.get("key")
199
+ consistency = ConsistencyModel(params.get("consistency", "eventual"))
200
+
201
+ if not key:
202
+ raise ValueError("Read requires 'key'")
203
+
204
+ # For strong consistency, ensure we have latest version
205
+ if consistency == ConsistencyModel.STRONG:
206
+ await self._ensure_latest_version(key)
207
+
208
+ # Find edge with data
209
+ edge_with_data = await self._find_edge_with_data(key)
210
+ if not edge_with_data:
211
+ return {"success": False, "error": f"Key '{key}' not found"}
212
+
213
+ # Get data from edge
214
+ edge_name, data_entry = edge_with_data
215
+
216
+ # Check staleness for bounded consistency
217
+ if consistency == ConsistencyModel.BOUNDED_STALENESS:
218
+ staleness_ms = self._calculate_staleness(data_entry)
219
+ threshold = params.get("staleness_threshold_ms", 5000)
220
+
221
+ if staleness_ms > threshold:
222
+ # Try to get fresher data
223
+ await self._refresh_from_primary(key)
224
+ edge_with_data = await self._find_edge_with_data(key)
225
+ if edge_with_data:
226
+ edge_name, data_entry = edge_with_data
227
+
228
+ return {
229
+ "success": True,
230
+ "key": key,
231
+ "data": data_entry["data"],
232
+ "version": data_entry["version"],
233
+ "timestamp": data_entry["timestamp"],
234
+ "edge": edge_name,
235
+ "latency_ms": self._get_edge_latency(edge_name),
236
+ }
237
+
238
+ async def _handle_replicate(self, params: Dict[str, Any]) -> Dict[str, Any]:
239
+ """Handle manual replication request."""
240
+ key = params.get("key")
241
+ target_edges = params.get("target_edges", [])
242
+
243
+ if not key:
244
+ raise ValueError("Replicate requires 'key'")
245
+
246
+ # Find source data
247
+ edge_with_data = await self._find_edge_with_data(key)
248
+ if not edge_with_data:
249
+ return {"success": False, "error": f"Key '{key}' not found"}
250
+
251
+ source_edge, data_entry = edge_with_data
252
+
253
+ # Replicate to targets
254
+ replication_results = {}
255
+ for target in target_edges:
256
+ success = await self._replicate_to_edge(
257
+ target, key, data_entry["data"], data_entry["version"]
258
+ )
259
+ replication_results[target] = success
260
+
261
+ return {
262
+ "success": True,
263
+ "key": key,
264
+ "source_edge": source_edge,
265
+ "replication_results": replication_results,
266
+ }
267
+
268
+ async def _handle_sync(self, params: Dict[str, Any]) -> Dict[str, Any]:
269
+ """Handle sync operation to ensure consistency."""
270
+ keys = params.get("keys", [])
271
+
272
+ sync_results = {}
273
+ for key in keys:
274
+ # Find all versions across edges
275
+ versions = self._get_all_versions(key)
276
+
277
+ if not versions:
278
+ sync_results[key] = {"status": "not_found"}
279
+ continue
280
+
281
+ # Determine winning version
282
+ winner_edge, winner_version = self._resolve_conflict(versions)
283
+
284
+ # Sync winning version to all edges with the key
285
+ edges_to_sync = [e for e, v in versions.items() if v < winner_version]
286
+
287
+ if edges_to_sync:
288
+ data_entry = self._edge_data.get(winner_edge, {}).get(key)
289
+ if data_entry:
290
+ for edge in edges_to_sync:
291
+ await self._replicate_to_edge(
292
+ edge, key, data_entry["data"], winner_version
293
+ )
294
+
295
+ sync_results[key] = {
296
+ "status": "synced",
297
+ "winner_edge": winner_edge,
298
+ "winner_version": winner_version,
299
+ "synced_edges": edges_to_sync,
300
+ }
301
+
302
+ return {"success": True, "sync_results": sync_results}
303
+
304
+ async def _replicate_strong(
305
+ self, key: str, data: Any, version: int, status: ReplicationStatus
306
+ ):
307
+ """Perform strong consistency replication (2PC)."""
308
+ # Get target edges
309
+ target_edges = await self._select_replication_targets()
310
+
311
+ # Phase 1: Prepare
312
+ prepare_tasks = []
313
+ for edge in target_edges:
314
+ status.pending.add(edge.name)
315
+ prepare_tasks.append(self._prepare_replication(edge, key, data, version))
316
+
317
+ prepare_results = await asyncio.gather(*prepare_tasks, return_exceptions=True)
318
+
319
+ # Check if all prepared successfully
320
+ prepared_edges = []
321
+ for edge, result in zip(target_edges, prepare_results):
322
+ if isinstance(result, Exception) or not result:
323
+ status.failed.add(edge.name)
324
+ status.pending.discard(edge.name)
325
+ else:
326
+ prepared_edges.append(edge)
327
+
328
+ # Phase 2: Commit or Abort
329
+ if len(prepared_edges) == len(target_edges):
330
+ # All prepared - commit
331
+ commit_tasks = []
332
+ for edge in prepared_edges:
333
+ commit_tasks.append(self._commit_replication(edge, key, version))
334
+
335
+ await asyncio.gather(*commit_tasks, return_exceptions=True)
336
+
337
+ for edge in prepared_edges:
338
+ status.completed.add(edge.name)
339
+ status.pending.discard(edge.name)
340
+ else:
341
+ # Some failed - abort
342
+ abort_tasks = []
343
+ for edge in prepared_edges:
344
+ abort_tasks.append(self._abort_replication(edge, key, version))
345
+
346
+ await asyncio.gather(*abort_tasks, return_exceptions=True)
347
+
348
+ raise RuntimeError(
349
+ f"Strong consistency replication failed. "
350
+ f"Only {len(prepared_edges)}/{len(target_edges)} edges prepared."
351
+ )
352
+
353
+ async def _replicate_async(
354
+ self,
355
+ key: str,
356
+ data: Any,
357
+ version: int,
358
+ consistency: ConsistencyModel,
359
+ status: ReplicationStatus,
360
+ ):
361
+ """Perform async replication for eventual/causal/bounded consistency."""
362
+ target_edges = await self._select_replication_targets()
363
+
364
+ tasks = []
365
+ for edge in target_edges:
366
+ status.pending.add(edge.name)
367
+
368
+ if consistency == ConsistencyModel.CAUSAL:
369
+ # Add causal dependency tracking
370
+ task = self._replicate_causal(edge, key, data, version)
371
+ else:
372
+ # Simple async replication
373
+ task = self._replicate_to_edge(edge.name, key, data, version)
374
+
375
+ tasks.append(task)
376
+
377
+ results = await asyncio.gather(*tasks, return_exceptions=True)
378
+
379
+ for edge, result in zip(target_edges, results):
380
+ if isinstance(result, Exception) or not result:
381
+ status.failed.add(edge.name)
382
+ else:
383
+ status.completed.add(edge.name)
384
+ status.pending.discard(edge.name)
385
+
386
+ async def _select_replication_targets(self) -> List[EdgeLocation]:
387
+ """Select edges for replication based on strategy."""
388
+ all_edges = self.edge_discovery.get_all_edges()
389
+
390
+ # Remove current edge
391
+ target_edges = [e for e in all_edges if e.name != self.current_edge.name]
392
+
393
+ # Filter by compliance if needed
394
+ if self.compliance_zones:
395
+ target_edges = [
396
+ e
397
+ for e in target_edges
398
+ if any(zone in e.compliance_zones for zone in self.compliance_zones)
399
+ ]
400
+
401
+ # Sort by strategy and take replication_factor - 1 (current edge is 1)
402
+ target_edges = sorted(
403
+ target_edges,
404
+ key=lambda e: (e.metrics.latency_p50_ms, e.metrics.network_cost_per_gb),
405
+ )
406
+
407
+ return target_edges[: self.config.get("replication_factor", 3) - 1]
408
+
409
+ async def _replicate_to_edge(
410
+ self, edge_name: str, key: str, data: Any, version: int
411
+ ) -> bool:
412
+ """Replicate data to specific edge."""
413
+ try:
414
+ # Simulate network replication
415
+ await asyncio.sleep(0.05) # 50ms replication latency
416
+
417
+ # Store in edge data
418
+ if edge_name not in self._edge_data:
419
+ self._edge_data[edge_name] = {}
420
+ self._data_versions[edge_name] = {}
421
+
422
+ self._edge_data[edge_name][key] = {
423
+ "data": data,
424
+ "version": version,
425
+ "timestamp": datetime.now(UTC).isoformat(),
426
+ "edge": edge_name,
427
+ }
428
+ self._data_versions[edge_name][key] = version
429
+
430
+ return True
431
+
432
+ except Exception as e:
433
+ self.logger.error(f"Replication to {edge_name} failed: {e}")
434
+ return False
435
+
436
+ async def _prepare_replication(
437
+ self, edge: EdgeLocation, key: str, data: Any, version: int
438
+ ) -> bool:
439
+ """Prepare phase of 2PC replication."""
440
+ # Simulate prepare phase
441
+ await asyncio.sleep(0.02)
442
+
443
+ # Check if edge can accept the write
444
+ if edge.metrics.storage_utilization > 0.95: # 95% full
445
+ return False
446
+
447
+ return True
448
+
449
+ async def _commit_replication(
450
+ self, edge: EdgeLocation, key: str, version: int
451
+ ) -> bool:
452
+ """Commit phase of 2PC replication."""
453
+ # Actually replicate the data
454
+ data_entry = self._edge_data.get(self.current_edge.name, {}).get(key)
455
+ if data_entry:
456
+ return await self._replicate_to_edge(
457
+ edge.name, key, data_entry["data"], version
458
+ )
459
+ return False
460
+
461
+ async def _abort_replication(
462
+ self, edge: EdgeLocation, key: str, version: int
463
+ ) -> bool:
464
+ """Abort phase of 2PC replication."""
465
+ # Clean up any prepared state
466
+ await asyncio.sleep(0.01)
467
+ return True
468
+
469
+ async def _replicate_causal(
470
+ self, edge: EdgeLocation, key: str, data: Any, version: int
471
+ ) -> bool:
472
+ """Replicate with causal consistency tracking."""
473
+ # Add causal dependency metadata
474
+ causal_data = {
475
+ "data": data,
476
+ "version": version,
477
+ "causal_deps": self._get_causal_dependencies(key),
478
+ "timestamp": datetime.now(UTC).isoformat(),
479
+ }
480
+
481
+ return await self._replicate_to_edge(edge.name, key, causal_data, version)
482
+
483
+ def _get_next_version(self, key: str) -> int:
484
+ """Get next version number for a key."""
485
+ max_version = 0
486
+
487
+ for edge_versions in self._data_versions.values():
488
+ if key in edge_versions:
489
+ max_version = max(max_version, edge_versions[key])
490
+
491
+ return max_version + 1
492
+
493
+ async def _find_edge_with_data(
494
+ self, key: str
495
+ ) -> Optional[tuple[str, Dict[str, Any]]]:
496
+ """Find edge that has the requested data."""
497
+ # First check current edge
498
+ if self.current_edge:
499
+ edge_name = self.current_edge.name
500
+ if edge_name in self._edge_data and key in self._edge_data[edge_name]:
501
+ return (edge_name, self._edge_data[edge_name][key])
502
+
503
+ # Check other edges by latency
504
+ edges_by_latency = sorted(
505
+ self.edge_discovery.get_all_edges(), key=lambda e: e.metrics.latency_p50_ms
506
+ )
507
+
508
+ for edge in edges_by_latency:
509
+ if edge.name in self._edge_data and key in self._edge_data[edge.name]:
510
+ return (edge.name, self._edge_data[edge.name][key])
511
+
512
+ return None
513
+
514
+ def _calculate_staleness(self, data_entry: Dict[str, Any]) -> float:
515
+ """Calculate data staleness in milliseconds."""
516
+ timestamp_str = data_entry["timestamp"]
517
+ # Handle both timezone-aware and naive timestamps
518
+ if timestamp_str.endswith("Z"):
519
+ timestamp_str = timestamp_str[:-1] + "+00:00"
520
+ timestamp = datetime.fromisoformat(timestamp_str)
521
+
522
+ # Make sure timestamp is timezone-aware
523
+ if timestamp.tzinfo is None:
524
+ timestamp = timestamp.replace(tzinfo=UTC)
525
+
526
+ staleness = datetime.now(UTC) - timestamp
527
+ return staleness.total_seconds() * 1000
528
+
529
+ def _get_edge_latency(self, edge_name: str) -> float:
530
+ """Get latency to specific edge."""
531
+ edge = self.edge_discovery.get_edge(edge_name)
532
+ return edge.metrics.latency_p50_ms if edge else 0.0
533
+
534
+ def _get_all_versions(self, key: str) -> Dict[str, int]:
535
+ """Get all versions of a key across edges."""
536
+ versions = {}
537
+
538
+ for edge_name, edge_versions in self._data_versions.items():
539
+ if key in edge_versions:
540
+ versions[edge_name] = edge_versions[key]
541
+
542
+ return versions
543
+
544
+ def _resolve_conflict(self, versions: Dict[str, int]) -> tuple[str, int]:
545
+ """Resolve version conflict using configured strategy."""
546
+ # For now, last write wins (highest version)
547
+ if not versions:
548
+ return (None, 0)
549
+
550
+ winner_edge = max(versions.items(), key=lambda x: x[1])
551
+ return winner_edge
552
+
553
+ async def _ensure_latest_version(self, key: str):
554
+ """Ensure we have the latest version for strong consistency."""
555
+ # In production, this would check with other edges
556
+ await asyncio.sleep(0.01) # Simulate version check
557
+
558
+ async def _refresh_from_primary(self, key: str):
559
+ """Refresh data from primary edge for bounded staleness."""
560
+ # In production, this would fetch from primary
561
+ await asyncio.sleep(0.02) # Simulate refresh
562
+
563
+ def _get_causal_dependencies(self, key: str) -> List[str]:
564
+ """Get causal dependencies for a key."""
565
+ # In production, track actual dependencies
566
+ return []
567
+
568
+ async def cleanup(self):
569
+ """Cleanup resources including replication tasks."""
570
+ # Cancel all replication tasks
571
+ tasks_to_cancel = []
572
+ for task_id, task in self._replication_tasks.items():
573
+ if not task.done():
574
+ task.cancel()
575
+ tasks_to_cancel.append(task)
576
+
577
+ # Wait for all cancelled tasks to complete
578
+ if tasks_to_cancel:
579
+ await asyncio.gather(*tasks_to_cancel, return_exceptions=True)
580
+
581
+ # Clear the task registry
582
+ self._replication_tasks.clear()