kailash 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. kailash/__init__.py +1 -7
  2. kailash/cli/__init__.py +11 -1
  3. kailash/cli/validation_audit.py +570 -0
  4. kailash/core/actors/supervisor.py +1 -1
  5. kailash/core/resilience/circuit_breaker.py +71 -1
  6. kailash/core/resilience/health_monitor.py +172 -0
  7. kailash/edge/compliance.py +33 -0
  8. kailash/edge/consistency.py +609 -0
  9. kailash/edge/coordination/__init__.py +30 -0
  10. kailash/edge/coordination/global_ordering.py +355 -0
  11. kailash/edge/coordination/leader_election.py +217 -0
  12. kailash/edge/coordination/partition_detector.py +296 -0
  13. kailash/edge/coordination/raft.py +485 -0
  14. kailash/edge/discovery.py +63 -1
  15. kailash/edge/migration/__init__.py +19 -0
  16. kailash/edge/migration/edge_migrator.py +832 -0
  17. kailash/edge/monitoring/__init__.py +21 -0
  18. kailash/edge/monitoring/edge_monitor.py +736 -0
  19. kailash/edge/prediction/__init__.py +10 -0
  20. kailash/edge/prediction/predictive_warmer.py +591 -0
  21. kailash/edge/resource/__init__.py +102 -0
  22. kailash/edge/resource/cloud_integration.py +796 -0
  23. kailash/edge/resource/cost_optimizer.py +949 -0
  24. kailash/edge/resource/docker_integration.py +919 -0
  25. kailash/edge/resource/kubernetes_integration.py +893 -0
  26. kailash/edge/resource/platform_integration.py +913 -0
  27. kailash/edge/resource/predictive_scaler.py +959 -0
  28. kailash/edge/resource/resource_analyzer.py +824 -0
  29. kailash/edge/resource/resource_pools.py +610 -0
  30. kailash/integrations/dataflow_edge.py +261 -0
  31. kailash/mcp_server/registry_integration.py +1 -1
  32. kailash/monitoring/__init__.py +18 -0
  33. kailash/monitoring/alerts.py +646 -0
  34. kailash/monitoring/metrics.py +677 -0
  35. kailash/nodes/__init__.py +2 -0
  36. kailash/nodes/ai/__init__.py +17 -0
  37. kailash/nodes/ai/a2a.py +1914 -43
  38. kailash/nodes/ai/a2a_backup.py +1807 -0
  39. kailash/nodes/ai/hybrid_search.py +972 -0
  40. kailash/nodes/ai/semantic_memory.py +558 -0
  41. kailash/nodes/ai/streaming_analytics.py +947 -0
  42. kailash/nodes/base.py +545 -0
  43. kailash/nodes/edge/__init__.py +36 -0
  44. kailash/nodes/edge/base.py +240 -0
  45. kailash/nodes/edge/cloud_node.py +710 -0
  46. kailash/nodes/edge/coordination.py +239 -0
  47. kailash/nodes/edge/docker_node.py +825 -0
  48. kailash/nodes/edge/edge_data.py +582 -0
  49. kailash/nodes/edge/edge_migration_node.py +392 -0
  50. kailash/nodes/edge/edge_monitoring_node.py +421 -0
  51. kailash/nodes/edge/edge_state.py +673 -0
  52. kailash/nodes/edge/edge_warming_node.py +393 -0
  53. kailash/nodes/edge/kubernetes_node.py +652 -0
  54. kailash/nodes/edge/platform_node.py +766 -0
  55. kailash/nodes/edge/resource_analyzer_node.py +378 -0
  56. kailash/nodes/edge/resource_optimizer_node.py +501 -0
  57. kailash/nodes/edge/resource_scaler_node.py +397 -0
  58. kailash/nodes/ports.py +676 -0
  59. kailash/runtime/local.py +344 -1
  60. kailash/runtime/validation/__init__.py +20 -0
  61. kailash/runtime/validation/connection_context.py +119 -0
  62. kailash/runtime/validation/enhanced_error_formatter.py +202 -0
  63. kailash/runtime/validation/error_categorizer.py +164 -0
  64. kailash/runtime/validation/metrics.py +380 -0
  65. kailash/runtime/validation/performance.py +615 -0
  66. kailash/runtime/validation/suggestion_engine.py +212 -0
  67. kailash/testing/fixtures.py +2 -2
  68. kailash/workflow/builder.py +234 -8
  69. kailash/workflow/contracts.py +418 -0
  70. kailash/workflow/edge_infrastructure.py +369 -0
  71. kailash/workflow/migration.py +3 -3
  72. kailash/workflow/type_inference.py +669 -0
  73. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/METADATA +44 -27
  74. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/RECORD +78 -28
  75. kailash/nexus/__init__.py +0 -21
  76. kailash/nexus/cli/__init__.py +0 -5
  77. kailash/nexus/cli/__main__.py +0 -6
  78. kailash/nexus/cli/main.py +0 -176
  79. kailash/nexus/factory.py +0 -413
  80. kailash/nexus/gateway.py +0 -545
  81. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
  82. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
  83. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
  84. {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,609 @@
1
+ """Consistency models and managers for edge computing."""
2
+
3
+ import asyncio
4
+ import logging
5
+ import time
6
+ from abc import ABC, abstractmethod
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timedelta
9
+ from enum import Enum
10
+ from typing import Any, Callable, Dict, List, Optional, Set
11
+
12
+
13
+ class ConsistencyLevel(Enum):
14
+ """Consistency levels for distributed operations."""
15
+
16
+ ONE = 1 # At least one replica
17
+ QUORUM = 2 # Majority of replicas
18
+ ALL = 3 # All replicas
19
+ LOCAL_QUORUM = 4 # Quorum within local region
20
+ EACH_QUORUM = 5 # Quorum in each region
21
+
22
+
23
+ @dataclass
24
+ class ConsistencyMetrics:
25
+ """Metrics for consistency operations."""
26
+
27
+ writes_total: int = 0
28
+ writes_succeeded: int = 0
29
+ writes_failed: int = 0
30
+ reads_total: int = 0
31
+ reads_stale: int = 0
32
+ conflicts_detected: int = 0
33
+ conflicts_resolved: int = 0
34
+ average_replication_lag_ms: float = 0.0
35
+ max_replication_lag_ms: float = 0.0
36
+
37
+
38
+ @dataclass
39
+ class Version:
40
+ """Version information for distributed data."""
41
+
42
+ number: int
43
+ timestamp: datetime
44
+ edge_id: str
45
+ vector_clock: Dict[str, int] = field(default_factory=dict)
46
+
47
+ def is_newer_than(self, other: "Version") -> bool:
48
+ """Check if this version is newer than another."""
49
+ # First check vector clock for causal ordering
50
+ if self.vector_clock and other.vector_clock:
51
+ return self._dominates_vector_clock(other)
52
+
53
+ # Fall back to timestamp comparison
54
+ return self.timestamp > other.timestamp
55
+
56
+ def _dominates_vector_clock(self, other: "Version") -> bool:
57
+ """Check if this version dominates another via vector clock."""
58
+ at_least_one_greater = False
59
+
60
+ for node_id in set(self.vector_clock.keys()) | set(other.vector_clock.keys()):
61
+ self_val = self.vector_clock.get(node_id, 0)
62
+ other_val = other.vector_clock.get(node_id, 0)
63
+
64
+ if self_val < other_val:
65
+ return False
66
+ elif self_val > other_val:
67
+ at_least_one_greater = True
68
+
69
+ return at_least_one_greater
70
+
71
+
72
+ class ConsistencyManager(ABC):
73
+ """Abstract base class for consistency managers."""
74
+
75
+ def __init__(self):
76
+ self.metrics = ConsistencyMetrics()
77
+ self.logger = logging.getLogger(
78
+ f"{self.__class__.__module__}.{self.__class__.__name__}"
79
+ )
80
+
81
+ @abstractmethod
82
+ async def write(
83
+ self,
84
+ key: str,
85
+ value: Any,
86
+ replicas: List[str],
87
+ level: ConsistencyLevel = ConsistencyLevel.QUORUM,
88
+ ) -> bool:
89
+ """Write with consistency guarantees."""
90
+ pass
91
+
92
+ @abstractmethod
93
+ async def read(
94
+ self,
95
+ key: str,
96
+ replicas: List[str],
97
+ level: ConsistencyLevel = ConsistencyLevel.QUORUM,
98
+ ) -> Optional[Any]:
99
+ """Read with consistency guarantees."""
100
+ pass
101
+
102
+
103
+ class StrongConsistencyManager(ConsistencyManager):
104
+ """Manager for strong consistency using Two-Phase Commit (2PC)."""
105
+
106
+ def __init__(self, write_callback: Callable, read_callback: Callable):
107
+ super().__init__()
108
+ self.write_callback = write_callback
109
+ self.read_callback = read_callback
110
+ self.prepared_writes: Dict[str, Set[str]] = {}
111
+
112
+ async def write(
113
+ self,
114
+ key: str,
115
+ value: Any,
116
+ replicas: List[str],
117
+ level: ConsistencyLevel = ConsistencyLevel.ALL,
118
+ ) -> bool:
119
+ """Write with strong consistency (2PC)."""
120
+ self.metrics.writes_total += 1
121
+
122
+ transaction_id = f"{key}:{time.time()}"
123
+ prepared_replicas = set()
124
+
125
+ try:
126
+ # Phase 1: Prepare
127
+ prepare_tasks = []
128
+ for replica in replicas:
129
+ prepare_tasks.append(
130
+ self._prepare_write(transaction_id, replica, key, value)
131
+ )
132
+
133
+ prepare_results = await asyncio.gather(
134
+ *prepare_tasks, return_exceptions=True
135
+ )
136
+
137
+ # Check prepare results
138
+ for replica, result in zip(replicas, prepare_results):
139
+ if isinstance(result, Exception):
140
+ self.logger.error(f"Prepare failed for {replica}: {result}")
141
+ elif result:
142
+ prepared_replicas.add(replica)
143
+
144
+ # Check if we have enough replicas
145
+ if not self._check_consistency_level(
146
+ len(prepared_replicas), len(replicas), level
147
+ ):
148
+ # Abort transaction
149
+ await self._abort_transaction(transaction_id, prepared_replicas)
150
+ self.metrics.writes_failed += 1
151
+ return False
152
+
153
+ # Phase 2: Commit
154
+ commit_tasks = []
155
+ for replica in prepared_replicas:
156
+ commit_tasks.append(
157
+ self._commit_write(transaction_id, replica, key, value)
158
+ )
159
+
160
+ await asyncio.gather(*commit_tasks, return_exceptions=True)
161
+
162
+ self.metrics.writes_succeeded += 1
163
+ return True
164
+
165
+ except Exception as e:
166
+ self.logger.error(f"2PC write failed: {e}")
167
+ await self._abort_transaction(transaction_id, prepared_replicas)
168
+ self.metrics.writes_failed += 1
169
+ return False
170
+
171
+ async def read(
172
+ self,
173
+ key: str,
174
+ replicas: List[str],
175
+ level: ConsistencyLevel = ConsistencyLevel.ALL,
176
+ ) -> Optional[Any]:
177
+ """Read with strong consistency."""
178
+ self.metrics.reads_total += 1
179
+
180
+ # Read from all replicas
181
+ read_tasks = []
182
+ for replica in replicas:
183
+ read_tasks.append(self.read_callback(replica, key))
184
+
185
+ results = await asyncio.gather(*read_tasks, return_exceptions=True)
186
+
187
+ # Filter valid results
188
+ valid_results = []
189
+ for result in results:
190
+ if not isinstance(result, Exception) and result is not None:
191
+ valid_results.append(result)
192
+
193
+ if not valid_results:
194
+ return None
195
+
196
+ # For strong consistency, all must agree
197
+ first_value = valid_results[0]
198
+ if all(r == first_value for r in valid_results):
199
+ return first_value
200
+ else:
201
+ # Inconsistency detected
202
+ self.metrics.conflicts_detected += 1
203
+ # Return most recent value
204
+ return max(valid_results, key=lambda x: x.get("timestamp", 0))
205
+
206
+ async def _prepare_write(
207
+ self, transaction_id: str, replica: str, key: str, value: Any
208
+ ) -> bool:
209
+ """Prepare phase of 2PC."""
210
+ # Simulate prepare (in production, this would be an RPC)
211
+ await asyncio.sleep(0.01)
212
+ return True
213
+
214
+ async def _commit_write(
215
+ self, transaction_id: str, replica: str, key: str, value: Any
216
+ ) -> bool:
217
+ """Commit phase of 2PC."""
218
+ return await self.write_callback(replica, key, value)
219
+
220
+ async def _abort_transaction(
221
+ self, transaction_id: str, prepared_replicas: Set[str]
222
+ ):
223
+ """Abort a prepared transaction."""
224
+ abort_tasks = []
225
+ for replica in prepared_replicas:
226
+ abort_tasks.append(self._abort_replica(transaction_id, replica))
227
+
228
+ await asyncio.gather(*abort_tasks, return_exceptions=True)
229
+
230
+ async def _abort_replica(self, transaction_id: str, replica: str):
231
+ """Abort on a single replica."""
232
+ await asyncio.sleep(0.01) # Simulate abort
233
+
234
+ def _check_consistency_level(
235
+ self, successful: int, total: int, level: ConsistencyLevel
236
+ ) -> bool:
237
+ """Check if consistency level is satisfied."""
238
+ if level == ConsistencyLevel.ONE:
239
+ return successful >= 1
240
+ elif level == ConsistencyLevel.QUORUM:
241
+ return successful > total // 2
242
+ elif level == ConsistencyLevel.ALL:
243
+ return successful == total
244
+ else:
245
+ # For LOCAL_QUORUM and EACH_QUORUM, simplified check
246
+ return successful > total // 2
247
+
248
+
249
+ class EventualConsistencyManager(ConsistencyManager):
250
+ """Manager for eventual consistency with async replication."""
251
+
252
+ def __init__(self, write_callback: Callable, read_callback: Callable):
253
+ super().__init__()
254
+ self.write_callback = write_callback
255
+ self.read_callback = read_callback
256
+ self.replication_lag: Dict[str, float] = {}
257
+
258
+ async def write(
259
+ self,
260
+ key: str,
261
+ value: Any,
262
+ replicas: List[str],
263
+ level: ConsistencyLevel = ConsistencyLevel.ONE,
264
+ ) -> bool:
265
+ """Write with eventual consistency."""
266
+ self.metrics.writes_total += 1
267
+
268
+ # Write to primary first
269
+ primary = replicas[0] if replicas else None
270
+ if not primary:
271
+ self.metrics.writes_failed += 1
272
+ return False
273
+
274
+ try:
275
+ # Write to primary
276
+ success = await self.write_callback(primary, key, value)
277
+ if not success:
278
+ self.metrics.writes_failed += 1
279
+ return False
280
+
281
+ # Async replication to secondaries
282
+ if len(replicas) > 1:
283
+ asyncio.create_task(self._replicate_async(key, value, replicas[1:]))
284
+
285
+ self.metrics.writes_succeeded += 1
286
+ return True
287
+
288
+ except Exception as e:
289
+ self.logger.error(f"Eventual write failed: {e}")
290
+ self.metrics.writes_failed += 1
291
+ return False
292
+
293
+ async def read(
294
+ self,
295
+ key: str,
296
+ replicas: List[str],
297
+ level: ConsistencyLevel = ConsistencyLevel.ONE,
298
+ ) -> Optional[Any]:
299
+ """Read with eventual consistency."""
300
+ self.metrics.reads_total += 1
301
+
302
+ # Read from any available replica
303
+ for replica in replicas:
304
+ try:
305
+ result = await self.read_callback(replica, key)
306
+ if result is not None:
307
+ # Check staleness
308
+ if self._is_stale(replica, result):
309
+ self.metrics.reads_stale += 1
310
+
311
+ return result
312
+ except Exception:
313
+ continue
314
+
315
+ return None
316
+
317
+ async def _replicate_async(self, key: str, value: Any, replicas: List[str]):
318
+ """Asynchronously replicate to secondary replicas."""
319
+ start_time = time.time()
320
+
321
+ tasks = []
322
+ for replica in replicas:
323
+ tasks.append(self.write_callback(replica, key, value))
324
+
325
+ results = await asyncio.gather(*tasks, return_exceptions=True)
326
+
327
+ # Track replication lag
328
+ lag = (time.time() - start_time) * 1000 # ms
329
+ for replica in replicas:
330
+ self.replication_lag[replica] = lag
331
+
332
+ # Update metrics
333
+ self._update_replication_metrics()
334
+
335
+ def _is_stale(self, replica: str, data: Dict[str, Any]) -> bool:
336
+ """Check if data from replica is stale."""
337
+ if "timestamp" not in data:
338
+ return False
339
+
340
+ data_age = time.time() - data["timestamp"]
341
+ return data_age > 5.0 # Consider stale if > 5 seconds
342
+
343
+ def _update_replication_metrics(self):
344
+ """Update replication lag metrics."""
345
+ if self.replication_lag:
346
+ lags = list(self.replication_lag.values())
347
+ self.metrics.average_replication_lag_ms = sum(lags) / len(lags)
348
+ self.metrics.max_replication_lag_ms = max(lags)
349
+
350
+
351
+ class CausalConsistencyManager(ConsistencyManager):
352
+ """Manager for causal consistency with dependency tracking."""
353
+
354
+ def __init__(self, write_callback: Callable, read_callback: Callable):
355
+ super().__init__()
356
+ self.write_callback = write_callback
357
+ self.read_callback = read_callback
358
+ self.vector_clocks: Dict[str, Dict[str, int]] = {}
359
+ self.causal_dependencies: Dict[str, Set[str]] = {}
360
+
361
+ async def write(
362
+ self,
363
+ key: str,
364
+ value: Any,
365
+ replicas: List[str],
366
+ level: ConsistencyLevel = ConsistencyLevel.QUORUM,
367
+ ) -> bool:
368
+ """Write with causal consistency."""
369
+ self.metrics.writes_total += 1
370
+
371
+ # Update vector clock
372
+ node_id = replicas[0] # Primary
373
+ if key not in self.vector_clocks:
374
+ self.vector_clocks[key] = {}
375
+
376
+ current_clock = self.vector_clocks[key].get(node_id, 0)
377
+ self.vector_clocks[key][node_id] = current_clock + 1
378
+
379
+ # Add causal metadata
380
+ causal_value = {
381
+ "data": value,
382
+ "vector_clock": self.vector_clocks[key].copy(),
383
+ "dependencies": list(self.causal_dependencies.get(key, set())),
384
+ "timestamp": time.time(),
385
+ }
386
+
387
+ # Write with causal ordering
388
+ success_count = 0
389
+ write_tasks = []
390
+
391
+ for replica in replicas:
392
+ write_tasks.append(
393
+ self._write_with_dependencies(replica, key, causal_value)
394
+ )
395
+
396
+ results = await asyncio.gather(*write_tasks, return_exceptions=True)
397
+
398
+ for result in results:
399
+ if not isinstance(result, Exception) and result:
400
+ success_count += 1
401
+
402
+ if self._check_consistency_level(success_count, len(replicas), level):
403
+ self.metrics.writes_succeeded += 1
404
+ return True
405
+ else:
406
+ self.metrics.writes_failed += 1
407
+ return False
408
+
409
+ async def read(
410
+ self,
411
+ key: str,
412
+ replicas: List[str],
413
+ level: ConsistencyLevel = ConsistencyLevel.QUORUM,
414
+ ) -> Optional[Any]:
415
+ """Read with causal consistency."""
416
+ self.metrics.reads_total += 1
417
+
418
+ # Read from multiple replicas
419
+ read_tasks = []
420
+ for replica in replicas:
421
+ read_tasks.append(self.read_callback(replica, key))
422
+
423
+ results = await asyncio.gather(*read_tasks, return_exceptions=True)
424
+
425
+ # Filter valid results with causal metadata
426
+ valid_results = []
427
+ for result in results:
428
+ if not isinstance(result, Exception) and result is not None:
429
+ valid_results.append(result)
430
+
431
+ if not valid_results:
432
+ return None
433
+
434
+ # Select causally consistent value
435
+ return self._select_causal_value(valid_results)
436
+
437
+ async def _write_with_dependencies(
438
+ self, replica: str, key: str, causal_value: Dict[str, Any]
439
+ ) -> bool:
440
+ """Write ensuring causal dependencies are satisfied."""
441
+ # Check if dependencies are satisfied on replica
442
+ deps = causal_value.get("dependencies", [])
443
+
444
+ for dep_key in deps:
445
+ dep_result = await self.read_callback(replica, dep_key)
446
+ if dep_result is None:
447
+ # Dependency not satisfied, delay write
448
+ await asyncio.sleep(0.1)
449
+ # Retry once
450
+ dep_result = await self.read_callback(replica, dep_key)
451
+ if dep_result is None:
452
+ return False
453
+
454
+ # Dependencies satisfied, proceed with write
455
+ return await self.write_callback(replica, key, causal_value)
456
+
457
+ def _select_causal_value(self, results: List[Dict[str, Any]]) -> Any:
458
+ """Select the causally most recent value."""
459
+ if not results:
460
+ return None
461
+
462
+ # Find value with highest vector clock
463
+ best_result = results[0]
464
+ best_clock = best_result.get("vector_clock", {})
465
+
466
+ for result in results[1:]:
467
+ result_clock = result.get("vector_clock", {})
468
+ if self._dominates(result_clock, best_clock):
469
+ best_result = result
470
+ best_clock = result_clock
471
+
472
+ return best_result.get("data")
473
+
474
+ def _dominates(self, clock1: Dict[str, int], clock2: Dict[str, int]) -> bool:
475
+ """Check if clock1 dominates clock2."""
476
+ at_least_one_greater = False
477
+
478
+ all_nodes = set(clock1.keys()) | set(clock2.keys())
479
+ for node in all_nodes:
480
+ val1 = clock1.get(node, 0)
481
+ val2 = clock2.get(node, 0)
482
+
483
+ if val1 < val2:
484
+ return False
485
+ elif val1 > val2:
486
+ at_least_one_greater = True
487
+
488
+ return at_least_one_greater
489
+
490
+ def _check_consistency_level(
491
+ self, successful: int, total: int, level: ConsistencyLevel
492
+ ) -> bool:
493
+ """Check if consistency level is satisfied."""
494
+ if level == ConsistencyLevel.ONE:
495
+ return successful >= 1
496
+ elif level == ConsistencyLevel.QUORUM:
497
+ return successful > total // 2
498
+ elif level == ConsistencyLevel.ALL:
499
+ return successful == total
500
+ else:
501
+ return successful > total // 2
502
+
503
+
504
+ class BoundedStalenessManager(ConsistencyManager):
505
+ """Manager for bounded staleness consistency."""
506
+
507
+ def __init__(
508
+ self,
509
+ write_callback: Callable,
510
+ read_callback: Callable,
511
+ max_staleness_ms: int = 5000,
512
+ ):
513
+ super().__init__()
514
+ self.write_callback = write_callback
515
+ self.read_callback = read_callback
516
+ self.max_staleness_ms = max_staleness_ms
517
+ self.write_timestamps: Dict[str, float] = {}
518
+
519
+ async def write(
520
+ self,
521
+ key: str,
522
+ value: Any,
523
+ replicas: List[str],
524
+ level: ConsistencyLevel = ConsistencyLevel.QUORUM,
525
+ ) -> bool:
526
+ """Write with bounded staleness."""
527
+ self.metrics.writes_total += 1
528
+
529
+ # Add timestamp
530
+ timestamped_value = {
531
+ "data": value,
532
+ "write_timestamp": time.time(),
533
+ "primary_replica": replicas[0] if replicas else None,
534
+ }
535
+
536
+ # Track write time
537
+ self.write_timestamps[key] = timestamped_value["write_timestamp"]
538
+
539
+ # Write to replicas
540
+ success_count = 0
541
+ write_tasks = []
542
+
543
+ for replica in replicas:
544
+ write_tasks.append(self.write_callback(replica, key, timestamped_value))
545
+
546
+ results = await asyncio.gather(*write_tasks, return_exceptions=True)
547
+
548
+ for result in results:
549
+ if not isinstance(result, Exception) and result:
550
+ success_count += 1
551
+
552
+ if self._check_consistency_level(success_count, len(replicas), level):
553
+ self.metrics.writes_succeeded += 1
554
+ return True
555
+ else:
556
+ self.metrics.writes_failed += 1
557
+ return False
558
+
559
+ async def read(
560
+ self,
561
+ key: str,
562
+ replicas: List[str],
563
+ level: ConsistencyLevel = ConsistencyLevel.ONE,
564
+ ) -> Optional[Any]:
565
+ """Read with bounded staleness guarantee."""
566
+ self.metrics.reads_total += 1
567
+
568
+ # Try to read from replicas in order
569
+ for replica in replicas:
570
+ try:
571
+ result = await self.read_callback(replica, key)
572
+ if result is None:
573
+ continue
574
+
575
+ # Check staleness
576
+ write_timestamp = result.get("write_timestamp", 0)
577
+ staleness_ms = (time.time() - write_timestamp) * 1000
578
+
579
+ if staleness_ms <= self.max_staleness_ms:
580
+ # Within bounds
581
+ return result.get("data")
582
+ else:
583
+ # Too stale, try to refresh
584
+ self.metrics.reads_stale += 1
585
+
586
+ # Try primary replica
587
+ primary = result.get("primary_replica")
588
+ if primary and primary != replica:
589
+ fresh_result = await self.read_callback(primary, key)
590
+ if fresh_result:
591
+ return fresh_result.get("data")
592
+
593
+ except Exception:
594
+ continue
595
+
596
+ return None
597
+
598
+ def _check_consistency_level(
599
+ self, successful: int, total: int, level: ConsistencyLevel
600
+ ) -> bool:
601
+ """Check if consistency level is satisfied."""
602
+ if level == ConsistencyLevel.ONE:
603
+ return successful >= 1
604
+ elif level == ConsistencyLevel.QUORUM:
605
+ return successful > total // 2
606
+ elif level == ConsistencyLevel.ALL:
607
+ return successful == total
608
+ else:
609
+ return successful > total // 2
@@ -0,0 +1,30 @@
1
+ """Edge coordination components for distributed consensus and ordering."""
2
+
3
+ from .global_ordering import GlobalOrderingService, HybridLogicalClock
4
+ from .leader_election import EdgeLeaderElection
5
+ from .partition_detector import PartitionDetector
6
+ from .raft import (
7
+ AppendEntriesRequest,
8
+ AppendEntriesResponse,
9
+ LogEntry,
10
+ PersistentState,
11
+ RaftNode,
12
+ RaftState,
13
+ RequestVoteRequest,
14
+ RequestVoteResponse,
15
+ )
16
+
17
+ __all__ = [
18
+ "RaftNode",
19
+ "RaftState",
20
+ "LogEntry",
21
+ "PersistentState",
22
+ "RequestVoteRequest",
23
+ "RequestVoteResponse",
24
+ "AppendEntriesRequest",
25
+ "AppendEntriesResponse",
26
+ "EdgeLeaderElection",
27
+ "GlobalOrderingService",
28
+ "HybridLogicalClock",
29
+ "PartitionDetector",
30
+ ]