kailash 0.8.4__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -7
- kailash/cli/__init__.py +11 -1
- kailash/cli/validation_audit.py +570 -0
- kailash/core/actors/supervisor.py +1 -1
- kailash/core/resilience/circuit_breaker.py +71 -1
- kailash/core/resilience/health_monitor.py +172 -0
- kailash/edge/compliance.py +33 -0
- kailash/edge/consistency.py +609 -0
- kailash/edge/coordination/__init__.py +30 -0
- kailash/edge/coordination/global_ordering.py +355 -0
- kailash/edge/coordination/leader_election.py +217 -0
- kailash/edge/coordination/partition_detector.py +296 -0
- kailash/edge/coordination/raft.py +485 -0
- kailash/edge/discovery.py +63 -1
- kailash/edge/migration/__init__.py +19 -0
- kailash/edge/migration/edge_migrator.py +832 -0
- kailash/edge/monitoring/__init__.py +21 -0
- kailash/edge/monitoring/edge_monitor.py +736 -0
- kailash/edge/prediction/__init__.py +10 -0
- kailash/edge/prediction/predictive_warmer.py +591 -0
- kailash/edge/resource/__init__.py +102 -0
- kailash/edge/resource/cloud_integration.py +796 -0
- kailash/edge/resource/cost_optimizer.py +949 -0
- kailash/edge/resource/docker_integration.py +919 -0
- kailash/edge/resource/kubernetes_integration.py +893 -0
- kailash/edge/resource/platform_integration.py +913 -0
- kailash/edge/resource/predictive_scaler.py +959 -0
- kailash/edge/resource/resource_analyzer.py +824 -0
- kailash/edge/resource/resource_pools.py +610 -0
- kailash/integrations/dataflow_edge.py +261 -0
- kailash/mcp_server/registry_integration.py +1 -1
- kailash/monitoring/__init__.py +18 -0
- kailash/monitoring/alerts.py +646 -0
- kailash/monitoring/metrics.py +677 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/semantic_memory.py +2 -2
- kailash/nodes/base.py +545 -0
- kailash/nodes/edge/__init__.py +36 -0
- kailash/nodes/edge/base.py +240 -0
- kailash/nodes/edge/cloud_node.py +710 -0
- kailash/nodes/edge/coordination.py +239 -0
- kailash/nodes/edge/docker_node.py +825 -0
- kailash/nodes/edge/edge_data.py +582 -0
- kailash/nodes/edge/edge_migration_node.py +392 -0
- kailash/nodes/edge/edge_monitoring_node.py +421 -0
- kailash/nodes/edge/edge_state.py +673 -0
- kailash/nodes/edge/edge_warming_node.py +393 -0
- kailash/nodes/edge/kubernetes_node.py +652 -0
- kailash/nodes/edge/platform_node.py +766 -0
- kailash/nodes/edge/resource_analyzer_node.py +378 -0
- kailash/nodes/edge/resource_optimizer_node.py +501 -0
- kailash/nodes/edge/resource_scaler_node.py +397 -0
- kailash/nodes/ports.py +676 -0
- kailash/runtime/local.py +344 -1
- kailash/runtime/validation/__init__.py +20 -0
- kailash/runtime/validation/connection_context.py +119 -0
- kailash/runtime/validation/enhanced_error_formatter.py +202 -0
- kailash/runtime/validation/error_categorizer.py +164 -0
- kailash/runtime/validation/metrics.py +380 -0
- kailash/runtime/validation/performance.py +615 -0
- kailash/runtime/validation/suggestion_engine.py +212 -0
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +230 -4
- kailash/workflow/contracts.py +418 -0
- kailash/workflow/edge_infrastructure.py +369 -0
- kailash/workflow/migration.py +3 -3
- kailash/workflow/type_inference.py +669 -0
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/METADATA +43 -27
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/RECORD +73 -27
- kailash/nexus/__init__.py +0 -21
- kailash/nexus/cli/__init__.py +0 -5
- kailash/nexus/cli/__main__.py +0 -6
- kailash/nexus/cli/main.py +0 -176
- kailash/nexus/factory.py +0 -413
- kailash/nexus/gateway.py +0 -545
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.8.4.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,485 @@
|
|
1
|
+
"""Raft consensus protocol implementation for edge coordination."""
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import logging
|
5
|
+
import random
|
6
|
+
from dataclasses import dataclass
|
7
|
+
from datetime import datetime, timedelta
|
8
|
+
from enum import Enum
|
9
|
+
from typing import Any, Callable, Dict, List, Optional
|
10
|
+
|
11
|
+
|
12
|
+
class RaftState(Enum):
|
13
|
+
"""Raft node states."""
|
14
|
+
|
15
|
+
FOLLOWER = "follower"
|
16
|
+
CANDIDATE = "candidate"
|
17
|
+
LEADER = "leader"
|
18
|
+
|
19
|
+
|
20
|
+
@dataclass
|
21
|
+
class LogEntry:
|
22
|
+
"""Entry in the Raft log."""
|
23
|
+
|
24
|
+
term: int
|
25
|
+
index: int
|
26
|
+
command: Dict[str, Any]
|
27
|
+
|
28
|
+
|
29
|
+
@dataclass
|
30
|
+
class PersistentState:
|
31
|
+
"""Persistent state that must survive restarts."""
|
32
|
+
|
33
|
+
current_term: int
|
34
|
+
voted_for: Optional[str]
|
35
|
+
log: List[LogEntry]
|
36
|
+
|
37
|
+
|
38
|
+
@dataclass
|
39
|
+
class RequestVoteRequest:
|
40
|
+
"""Request vote RPC request."""
|
41
|
+
|
42
|
+
term: int
|
43
|
+
candidate_id: str
|
44
|
+
last_log_index: int
|
45
|
+
last_log_term: int
|
46
|
+
|
47
|
+
|
48
|
+
@dataclass
|
49
|
+
class RequestVoteResponse:
|
50
|
+
"""Request vote RPC response."""
|
51
|
+
|
52
|
+
term: int
|
53
|
+
vote_granted: bool
|
54
|
+
|
55
|
+
|
56
|
+
@dataclass
|
57
|
+
class AppendEntriesRequest:
|
58
|
+
"""Append entries RPC request."""
|
59
|
+
|
60
|
+
term: int
|
61
|
+
leader_id: str
|
62
|
+
prev_log_index: int
|
63
|
+
prev_log_term: int
|
64
|
+
entries: List[LogEntry]
|
65
|
+
leader_commit: int
|
66
|
+
|
67
|
+
|
68
|
+
@dataclass
|
69
|
+
class AppendEntriesResponse:
|
70
|
+
"""Append entries RPC response."""
|
71
|
+
|
72
|
+
term: int
|
73
|
+
success: bool
|
74
|
+
|
75
|
+
|
76
|
+
class RaftNode:
|
77
|
+
"""Raft consensus node implementation."""
|
78
|
+
|
79
|
+
def __init__(
|
80
|
+
self,
|
81
|
+
node_id: str,
|
82
|
+
peers: List[str],
|
83
|
+
election_timeout_ms: int = 150,
|
84
|
+
heartbeat_interval_ms: int = 50,
|
85
|
+
rpc_handler: Optional[Callable] = None,
|
86
|
+
):
|
87
|
+
"""Initialize Raft node.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
node_id: Unique identifier for this node
|
91
|
+
peers: List of peer node IDs
|
92
|
+
election_timeout_ms: Base election timeout in milliseconds
|
93
|
+
heartbeat_interval_ms: Heartbeat interval in milliseconds
|
94
|
+
rpc_handler: Optional RPC handler for communication
|
95
|
+
"""
|
96
|
+
self.node_id = node_id
|
97
|
+
self.peers = peers
|
98
|
+
self.election_timeout_ms = election_timeout_ms
|
99
|
+
self.heartbeat_interval_ms = heartbeat_interval_ms
|
100
|
+
self._send_rpc = rpc_handler
|
101
|
+
|
102
|
+
# Persistent state
|
103
|
+
self.current_term = 0
|
104
|
+
self.voted_for: Optional[str] = None
|
105
|
+
self.log: List[LogEntry] = []
|
106
|
+
|
107
|
+
# Volatile state
|
108
|
+
self.state = RaftState.FOLLOWER
|
109
|
+
self.commit_index = 0
|
110
|
+
self.last_applied = 0
|
111
|
+
|
112
|
+
# Leader state
|
113
|
+
self.next_index: Dict[str, int] = {}
|
114
|
+
self.match_index: Dict[str, int] = {}
|
115
|
+
|
116
|
+
# Other state
|
117
|
+
self.leader_id: Optional[str] = None
|
118
|
+
self.last_heartbeat = datetime.now()
|
119
|
+
self.votes_received = 0
|
120
|
+
|
121
|
+
# Background tasks
|
122
|
+
self._election_task: Optional[asyncio.Task] = None
|
123
|
+
self._heartbeat_task: Optional[asyncio.Task] = None
|
124
|
+
self._running = False
|
125
|
+
|
126
|
+
self.logger = logging.getLogger(f"RaftNode[{node_id}]")
|
127
|
+
|
128
|
+
async def start(self):
|
129
|
+
"""Start the Raft node."""
|
130
|
+
self._running = True
|
131
|
+
self.last_heartbeat = datetime.now()
|
132
|
+
|
133
|
+
# Start election timeout task
|
134
|
+
self._election_task = asyncio.create_task(self._election_timeout_loop())
|
135
|
+
|
136
|
+
self.logger.info(f"Started as {self.state.value}")
|
137
|
+
|
138
|
+
async def stop(self):
|
139
|
+
"""Stop the Raft node."""
|
140
|
+
self._running = False
|
141
|
+
|
142
|
+
if self._election_task:
|
143
|
+
self._election_task.cancel()
|
144
|
+
try:
|
145
|
+
await self._election_task
|
146
|
+
except asyncio.CancelledError:
|
147
|
+
pass
|
148
|
+
|
149
|
+
if self._heartbeat_task:
|
150
|
+
self._heartbeat_task.cancel()
|
151
|
+
try:
|
152
|
+
await self._heartbeat_task
|
153
|
+
except asyncio.CancelledError:
|
154
|
+
pass
|
155
|
+
|
156
|
+
self.logger.info("Stopped")
|
157
|
+
|
158
|
+
async def propose(self, command: Dict[str, Any]) -> Dict[str, Any]:
|
159
|
+
"""Propose a command to the Raft cluster.
|
160
|
+
|
161
|
+
Args:
|
162
|
+
command: Command to propose
|
163
|
+
|
164
|
+
Returns:
|
165
|
+
Dict with success status and details
|
166
|
+
"""
|
167
|
+
if self.state != RaftState.LEADER:
|
168
|
+
return {
|
169
|
+
"success": False,
|
170
|
+
"error": "Not the leader",
|
171
|
+
"leader": self.leader_id,
|
172
|
+
}
|
173
|
+
|
174
|
+
# Append to log
|
175
|
+
entry = LogEntry(
|
176
|
+
term=self.current_term, index=len(self.log) + 1, command=command
|
177
|
+
)
|
178
|
+
self.log.append(entry)
|
179
|
+
|
180
|
+
# Replicate to followers
|
181
|
+
await self._replicate_log()
|
182
|
+
|
183
|
+
return {"success": True, "index": entry.index, "term": entry.term}
|
184
|
+
|
185
|
+
async def handle_request_vote(
|
186
|
+
self, request: RequestVoteRequest
|
187
|
+
) -> RequestVoteResponse:
|
188
|
+
"""Handle request vote RPC."""
|
189
|
+
# Update term if needed
|
190
|
+
if request.term > self.current_term:
|
191
|
+
self.current_term = request.term
|
192
|
+
self.voted_for = None
|
193
|
+
self._become_follower()
|
194
|
+
|
195
|
+
# Check if we can grant vote
|
196
|
+
vote_granted = False
|
197
|
+
|
198
|
+
if request.term < self.current_term:
|
199
|
+
# Reject old term
|
200
|
+
pass
|
201
|
+
elif self.voted_for is None or self.voted_for == request.candidate_id:
|
202
|
+
# Check log up-to-date
|
203
|
+
if self._is_log_up_to_date(request.last_log_index, request.last_log_term):
|
204
|
+
vote_granted = True
|
205
|
+
self.voted_for = request.candidate_id
|
206
|
+
self.last_heartbeat = datetime.now()
|
207
|
+
|
208
|
+
return RequestVoteResponse(term=self.current_term, vote_granted=vote_granted)
|
209
|
+
|
210
|
+
async def handle_append_entries(
|
211
|
+
self, request: AppendEntriesRequest
|
212
|
+
) -> AppendEntriesResponse:
|
213
|
+
"""Handle append entries RPC."""
|
214
|
+
# Update term if needed
|
215
|
+
if request.term > self.current_term:
|
216
|
+
self.current_term = request.term
|
217
|
+
self.voted_for = None
|
218
|
+
self._become_follower()
|
219
|
+
|
220
|
+
# Reset election timeout
|
221
|
+
self.last_heartbeat = datetime.now()
|
222
|
+
|
223
|
+
# Reject if term is old
|
224
|
+
if request.term < self.current_term:
|
225
|
+
return AppendEntriesResponse(term=self.current_term, success=False)
|
226
|
+
|
227
|
+
# Accept leader
|
228
|
+
self.leader_id = request.leader_id
|
229
|
+
if self.state == RaftState.CANDIDATE:
|
230
|
+
self._become_follower()
|
231
|
+
|
232
|
+
# Check log consistency
|
233
|
+
if request.prev_log_index > 0:
|
234
|
+
if request.prev_log_index > len(self.log):
|
235
|
+
return AppendEntriesResponse(term=self.current_term, success=False)
|
236
|
+
|
237
|
+
prev_entry = self.log[request.prev_log_index - 1]
|
238
|
+
if prev_entry.term != request.prev_log_term:
|
239
|
+
# Delete conflicting entries
|
240
|
+
self.log = self.log[: request.prev_log_index - 1]
|
241
|
+
return AppendEntriesResponse(term=self.current_term, success=False)
|
242
|
+
|
243
|
+
# Append new entries
|
244
|
+
if request.entries:
|
245
|
+
self.log = self.log[: request.prev_log_index]
|
246
|
+
self.log.extend(request.entries)
|
247
|
+
|
248
|
+
# Update commit index
|
249
|
+
if request.leader_commit > self.commit_index:
|
250
|
+
self.commit_index = min(request.leader_commit, len(self.log))
|
251
|
+
|
252
|
+
return AppendEntriesResponse(term=self.current_term, success=True)
|
253
|
+
|
254
|
+
def _become_follower(self):
|
255
|
+
"""Transition to follower state."""
|
256
|
+
self.state = RaftState.FOLLOWER
|
257
|
+
self.votes_received = 0
|
258
|
+
|
259
|
+
# Cancel heartbeat task if leader
|
260
|
+
if self._heartbeat_task:
|
261
|
+
self._heartbeat_task.cancel()
|
262
|
+
self._heartbeat_task = None
|
263
|
+
|
264
|
+
self.logger.info(f"Became follower in term {self.current_term}")
|
265
|
+
|
266
|
+
def _become_candidate(self):
|
267
|
+
"""Transition to candidate state."""
|
268
|
+
self.state = RaftState.CANDIDATE
|
269
|
+
self.current_term += 1
|
270
|
+
self.voted_for = self.node_id
|
271
|
+
self.votes_received = 1 # Vote for self
|
272
|
+
self.leader_id = None
|
273
|
+
|
274
|
+
self.logger.info(f"Became candidate in term {self.current_term}")
|
275
|
+
|
276
|
+
def _become_leader(self):
|
277
|
+
"""Transition to leader state."""
|
278
|
+
self.state = RaftState.LEADER
|
279
|
+
self.leader_id = self.node_id
|
280
|
+
|
281
|
+
# Initialize leader state
|
282
|
+
for peer in self.peers:
|
283
|
+
self.next_index[peer] = len(self.log) + 1
|
284
|
+
self.match_index[peer] = 0
|
285
|
+
|
286
|
+
# Start heartbeat task
|
287
|
+
if self._heartbeat_task:
|
288
|
+
self._heartbeat_task.cancel()
|
289
|
+
self._heartbeat_task = asyncio.create_task(self._heartbeat_loop())
|
290
|
+
|
291
|
+
self.logger.info(f"Became leader in term {self.current_term}")
|
292
|
+
|
293
|
+
async def _election_timeout_loop(self):
|
294
|
+
"""Background task for election timeout."""
|
295
|
+
while self._running:
|
296
|
+
try:
|
297
|
+
if self.state != RaftState.LEADER:
|
298
|
+
# Check election timeout
|
299
|
+
if self._election_timeout_elapsed():
|
300
|
+
await self._start_election()
|
301
|
+
|
302
|
+
# Sleep for a bit
|
303
|
+
await asyncio.sleep(0.01) # 10ms
|
304
|
+
|
305
|
+
except Exception as e:
|
306
|
+
self.logger.error(f"Election loop error: {e}")
|
307
|
+
|
308
|
+
async def _heartbeat_loop(self):
|
309
|
+
"""Background task for sending heartbeats."""
|
310
|
+
while self._running and self.state == RaftState.LEADER:
|
311
|
+
try:
|
312
|
+
await self._send_heartbeats()
|
313
|
+
await asyncio.sleep(self.heartbeat_interval_ms / 1000)
|
314
|
+
except Exception as e:
|
315
|
+
self.logger.error(f"Heartbeat loop error: {e}")
|
316
|
+
|
317
|
+
async def _start_election(self):
|
318
|
+
"""Start leader election."""
|
319
|
+
self._become_candidate()
|
320
|
+
self.last_heartbeat = datetime.now()
|
321
|
+
|
322
|
+
# Request votes from all peers
|
323
|
+
await self._collect_votes()
|
324
|
+
|
325
|
+
async def _collect_votes(self):
|
326
|
+
"""Collect votes from peers."""
|
327
|
+
# Prepare request
|
328
|
+
last_log_index = len(self.log)
|
329
|
+
last_log_term = self.log[-1].term if self.log else 0
|
330
|
+
|
331
|
+
request = RequestVoteRequest(
|
332
|
+
term=self.current_term,
|
333
|
+
candidate_id=self.node_id,
|
334
|
+
last_log_index=last_log_index,
|
335
|
+
last_log_term=last_log_term,
|
336
|
+
)
|
337
|
+
|
338
|
+
# Send vote requests
|
339
|
+
tasks = []
|
340
|
+
for peer in self.peers:
|
341
|
+
if self._send_rpc:
|
342
|
+
task = asyncio.create_task(self._send_request_vote(peer, request))
|
343
|
+
tasks.append(task)
|
344
|
+
|
345
|
+
# Collect responses
|
346
|
+
if tasks:
|
347
|
+
responses = await asyncio.gather(*tasks, return_exceptions=True)
|
348
|
+
|
349
|
+
for response in responses:
|
350
|
+
if isinstance(response, RequestVoteResponse):
|
351
|
+
if response.term > self.current_term:
|
352
|
+
self.current_term = response.term
|
353
|
+
self._become_follower()
|
354
|
+
return
|
355
|
+
|
356
|
+
if response.vote_granted and response.term == self.current_term:
|
357
|
+
self.votes_received += 1
|
358
|
+
|
359
|
+
# Check if we won
|
360
|
+
if self.votes_received > (len(self.peers) + 1) // 2:
|
361
|
+
self._become_leader()
|
362
|
+
else:
|
363
|
+
# Split vote, will retry after timeout
|
364
|
+
pass
|
365
|
+
|
366
|
+
async def _send_request_vote(
|
367
|
+
self, peer: str, request: RequestVoteRequest
|
368
|
+
) -> Optional[RequestVoteResponse]:
|
369
|
+
"""Send request vote RPC to peer."""
|
370
|
+
if self._send_rpc:
|
371
|
+
return await self._send_rpc(
|
372
|
+
peer, {"type": "request_vote", "request": request}
|
373
|
+
)
|
374
|
+
return None
|
375
|
+
|
376
|
+
async def _send_heartbeats(self):
|
377
|
+
"""Send heartbeats to all peers."""
|
378
|
+
tasks = []
|
379
|
+
for peer in self.peers:
|
380
|
+
task = asyncio.create_task(self._send_append_entries(peer))
|
381
|
+
tasks.append(task)
|
382
|
+
|
383
|
+
if tasks:
|
384
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
385
|
+
|
386
|
+
async def _send_append_entries(self, peer: str) -> Optional[AppendEntriesResponse]:
|
387
|
+
"""Send append entries RPC to peer."""
|
388
|
+
prev_log_index = self.next_index.get(peer, 1) - 1
|
389
|
+
prev_log_term = 0
|
390
|
+
|
391
|
+
if prev_log_index > 0 and prev_log_index <= len(self.log):
|
392
|
+
prev_log_term = self.log[prev_log_index - 1].term
|
393
|
+
|
394
|
+
# Get entries to send
|
395
|
+
entries = []
|
396
|
+
if prev_log_index < len(self.log):
|
397
|
+
entries = self.log[prev_log_index:]
|
398
|
+
|
399
|
+
request = AppendEntriesRequest(
|
400
|
+
term=self.current_term,
|
401
|
+
leader_id=self.node_id,
|
402
|
+
prev_log_index=prev_log_index,
|
403
|
+
prev_log_term=prev_log_term,
|
404
|
+
entries=entries,
|
405
|
+
leader_commit=self.commit_index,
|
406
|
+
)
|
407
|
+
|
408
|
+
if self._send_rpc:
|
409
|
+
response = await self._send_rpc(
|
410
|
+
peer, {"type": "append_entries", "request": request}
|
411
|
+
)
|
412
|
+
|
413
|
+
if response:
|
414
|
+
if response.term > self.current_term:
|
415
|
+
self.current_term = response.term
|
416
|
+
self._become_follower()
|
417
|
+
elif response.success:
|
418
|
+
# Update match index
|
419
|
+
if entries:
|
420
|
+
self.match_index[peer] = prev_log_index + len(entries)
|
421
|
+
self.next_index[peer] = self.match_index[peer] + 1
|
422
|
+
else:
|
423
|
+
# Decrement next index
|
424
|
+
self.next_index[peer] = max(1, self.next_index[peer] - 1)
|
425
|
+
|
426
|
+
return response
|
427
|
+
return None
|
428
|
+
|
429
|
+
async def _replicate_log(self):
|
430
|
+
"""Replicate log entries to followers."""
|
431
|
+
await self._send_heartbeats()
|
432
|
+
await self._update_commit_index()
|
433
|
+
|
434
|
+
async def _update_commit_index(self):
|
435
|
+
"""Update commit index based on replication."""
|
436
|
+
if self.state != RaftState.LEADER:
|
437
|
+
return
|
438
|
+
|
439
|
+
# Find highest index replicated on majority
|
440
|
+
for n in range(len(self.log), self.commit_index, -1):
|
441
|
+
if self.log[n - 1].term == self.current_term:
|
442
|
+
# Count replicas
|
443
|
+
replicas = 1 # Self
|
444
|
+
for peer in self.peers:
|
445
|
+
if self.match_index.get(peer, 0) >= n:
|
446
|
+
replicas += 1
|
447
|
+
|
448
|
+
if replicas > (len(self.peers) + 1) // 2:
|
449
|
+
self.commit_index = n
|
450
|
+
break
|
451
|
+
|
452
|
+
def _election_timeout_elapsed(self) -> bool:
|
453
|
+
"""Check if election timeout has elapsed."""
|
454
|
+
timeout = self._randomize_election_timeout()
|
455
|
+
elapsed = (datetime.now() - self.last_heartbeat).total_seconds() * 1000
|
456
|
+
return elapsed > timeout
|
457
|
+
|
458
|
+
def _randomize_election_timeout(self) -> int:
|
459
|
+
"""Get randomized election timeout."""
|
460
|
+
# Randomize between T and 2T
|
461
|
+
return random.randint(self.election_timeout_ms, self.election_timeout_ms * 2)
|
462
|
+
|
463
|
+
def _is_log_up_to_date(self, last_log_index: int, last_log_term: int) -> bool:
|
464
|
+
"""Check if candidate's log is at least as up-to-date as ours."""
|
465
|
+
our_last_index = len(self.log)
|
466
|
+
our_last_term = self.log[-1].term if self.log else 0
|
467
|
+
|
468
|
+
if last_log_term != our_last_term:
|
469
|
+
return last_log_term > our_last_term
|
470
|
+
|
471
|
+
return last_log_index >= our_last_index
|
472
|
+
|
473
|
+
def _save_persistent_state(self) -> PersistentState:
|
474
|
+
"""Save persistent state."""
|
475
|
+
return PersistentState(
|
476
|
+
current_term=self.current_term,
|
477
|
+
voted_for=self.voted_for,
|
478
|
+
log=self.log.copy(),
|
479
|
+
)
|
480
|
+
|
481
|
+
def _load_persistent_state(self, state: PersistentState):
|
482
|
+
"""Load persistent state."""
|
483
|
+
self.current_term = state.current_term
|
484
|
+
self.voted_for = state.voted_for
|
485
|
+
self.log = state.log.copy()
|
kailash/edge/discovery.py
CHANGED
@@ -249,13 +249,27 @@ class EdgeDiscovery:
|
|
249
249
|
ai_models_available=["llama", "claude"],
|
250
250
|
)
|
251
251
|
|
252
|
+
# Handle compliance zones
|
253
|
+
compliance_zones = []
|
254
|
+
if "compliance_zones" in edge_config:
|
255
|
+
for zone_str in edge_config["compliance_zones"]:
|
256
|
+
try:
|
257
|
+
# Try to find matching enum
|
258
|
+
for cz in ComplianceZone:
|
259
|
+
if cz.value == zone_str:
|
260
|
+
compliance_zones.append(cz)
|
261
|
+
break
|
262
|
+
except Exception:
|
263
|
+
pass
|
264
|
+
|
252
265
|
# Create edge location
|
253
266
|
location = EdgeLocation(
|
254
267
|
location_id=location_id,
|
255
|
-
name=f"Edge {region_str.title()}",
|
268
|
+
name=edge_config.get("name", f"Edge {region_str.title()}"),
|
256
269
|
region=region,
|
257
270
|
coordinates=coordinates,
|
258
271
|
capabilities=capabilities,
|
272
|
+
compliance_zones=compliance_zones if compliance_zones else None,
|
259
273
|
endpoint_url=edge_config.get(
|
260
274
|
"endpoint", f"http://{location_id}.edge.local:8080"
|
261
275
|
),
|
@@ -743,3 +757,51 @@ class EdgeDiscovery:
|
|
743
757
|
|
744
758
|
def __iter__(self):
|
745
759
|
return iter(self.locations.values())
|
760
|
+
|
761
|
+
async def start_discovery(self):
|
762
|
+
"""Start edge discovery service."""
|
763
|
+
# In a real implementation, this would start monitoring services
|
764
|
+
# For now, it's a no-op as health checks are started in __init__
|
765
|
+
pass
|
766
|
+
|
767
|
+
async def stop_discovery(self):
|
768
|
+
"""Stop edge discovery service."""
|
769
|
+
# Stop health check task if running
|
770
|
+
if hasattr(self, "_health_check_task") and self._health_check_task:
|
771
|
+
self._health_check_task.cancel()
|
772
|
+
try:
|
773
|
+
await self._health_check_task
|
774
|
+
except asyncio.CancelledError:
|
775
|
+
pass
|
776
|
+
|
777
|
+
def get_all_edges(self) -> List[EdgeLocation]:
|
778
|
+
"""Get all registered edge locations."""
|
779
|
+
return list(self.locations.values())
|
780
|
+
|
781
|
+
def get_edge(self, edge_name: str) -> Optional[EdgeLocation]:
|
782
|
+
"""Get edge location by name."""
|
783
|
+
for location in self.locations.values():
|
784
|
+
if location.name == edge_name:
|
785
|
+
return location
|
786
|
+
return None
|
787
|
+
|
788
|
+
async def select_edge(
|
789
|
+
self, strategy: EdgeSelectionStrategy = None, compliance_zones: List[str] = None
|
790
|
+
) -> Optional[EdgeLocation]:
|
791
|
+
"""Select an edge based on strategy and compliance zones."""
|
792
|
+
# For now, just return the first available edge
|
793
|
+
edges = self.get_all_edges()
|
794
|
+
if not edges:
|
795
|
+
return None
|
796
|
+
|
797
|
+
# Filter by compliance if specified
|
798
|
+
if compliance_zones:
|
799
|
+
compliant_edges = []
|
800
|
+
for edge in edges:
|
801
|
+
edge_zones = [z.value for z in edge.compliance_zones]
|
802
|
+
if any(zone in edge_zones for zone in compliance_zones):
|
803
|
+
compliant_edges.append(edge)
|
804
|
+
edges = compliant_edges if compliant_edges else edges
|
805
|
+
|
806
|
+
# For now, just return the first edge
|
807
|
+
return edges[0] if edges else None
|
@@ -0,0 +1,19 @@
|
|
1
|
+
"""Edge migration module."""
|
2
|
+
|
3
|
+
from .edge_migrator import (
|
4
|
+
EdgeMigrator,
|
5
|
+
MigrationCheckpoint,
|
6
|
+
MigrationPhase,
|
7
|
+
MigrationPlan,
|
8
|
+
MigrationProgress,
|
9
|
+
MigrationStrategy,
|
10
|
+
)
|
11
|
+
|
12
|
+
__all__ = [
|
13
|
+
"EdgeMigrator",
|
14
|
+
"MigrationStrategy",
|
15
|
+
"MigrationPhase",
|
16
|
+
"MigrationPlan",
|
17
|
+
"MigrationProgress",
|
18
|
+
"MigrationCheckpoint",
|
19
|
+
]
|