kailash 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -7
- kailash/cli/__init__.py +11 -1
- kailash/cli/validation_audit.py +570 -0
- kailash/core/actors/supervisor.py +1 -1
- kailash/core/resilience/circuit_breaker.py +71 -1
- kailash/core/resilience/health_monitor.py +172 -0
- kailash/edge/compliance.py +33 -0
- kailash/edge/consistency.py +609 -0
- kailash/edge/coordination/__init__.py +30 -0
- kailash/edge/coordination/global_ordering.py +355 -0
- kailash/edge/coordination/leader_election.py +217 -0
- kailash/edge/coordination/partition_detector.py +296 -0
- kailash/edge/coordination/raft.py +485 -0
- kailash/edge/discovery.py +63 -1
- kailash/edge/migration/__init__.py +19 -0
- kailash/edge/migration/edge_migrator.py +832 -0
- kailash/edge/monitoring/__init__.py +21 -0
- kailash/edge/monitoring/edge_monitor.py +736 -0
- kailash/edge/prediction/__init__.py +10 -0
- kailash/edge/prediction/predictive_warmer.py +591 -0
- kailash/edge/resource/__init__.py +102 -0
- kailash/edge/resource/cloud_integration.py +796 -0
- kailash/edge/resource/cost_optimizer.py +949 -0
- kailash/edge/resource/docker_integration.py +919 -0
- kailash/edge/resource/kubernetes_integration.py +893 -0
- kailash/edge/resource/platform_integration.py +913 -0
- kailash/edge/resource/predictive_scaler.py +959 -0
- kailash/edge/resource/resource_analyzer.py +824 -0
- kailash/edge/resource/resource_pools.py +610 -0
- kailash/integrations/dataflow_edge.py +261 -0
- kailash/mcp_server/registry_integration.py +1 -1
- kailash/monitoring/__init__.py +18 -0
- kailash/monitoring/alerts.py +646 -0
- kailash/monitoring/metrics.py +677 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/__init__.py +17 -0
- kailash/nodes/ai/a2a.py +1914 -43
- kailash/nodes/ai/a2a_backup.py +1807 -0
- kailash/nodes/ai/hybrid_search.py +972 -0
- kailash/nodes/ai/semantic_memory.py +558 -0
- kailash/nodes/ai/streaming_analytics.py +947 -0
- kailash/nodes/base.py +545 -0
- kailash/nodes/edge/__init__.py +36 -0
- kailash/nodes/edge/base.py +240 -0
- kailash/nodes/edge/cloud_node.py +710 -0
- kailash/nodes/edge/coordination.py +239 -0
- kailash/nodes/edge/docker_node.py +825 -0
- kailash/nodes/edge/edge_data.py +582 -0
- kailash/nodes/edge/edge_migration_node.py +392 -0
- kailash/nodes/edge/edge_monitoring_node.py +421 -0
- kailash/nodes/edge/edge_state.py +673 -0
- kailash/nodes/edge/edge_warming_node.py +393 -0
- kailash/nodes/edge/kubernetes_node.py +652 -0
- kailash/nodes/edge/platform_node.py +766 -0
- kailash/nodes/edge/resource_analyzer_node.py +378 -0
- kailash/nodes/edge/resource_optimizer_node.py +501 -0
- kailash/nodes/edge/resource_scaler_node.py +397 -0
- kailash/nodes/ports.py +676 -0
- kailash/runtime/local.py +344 -1
- kailash/runtime/validation/__init__.py +20 -0
- kailash/runtime/validation/connection_context.py +119 -0
- kailash/runtime/validation/enhanced_error_formatter.py +202 -0
- kailash/runtime/validation/error_categorizer.py +164 -0
- kailash/runtime/validation/metrics.py +380 -0
- kailash/runtime/validation/performance.py +615 -0
- kailash/runtime/validation/suggestion_engine.py +212 -0
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +234 -8
- kailash/workflow/contracts.py +418 -0
- kailash/workflow/edge_infrastructure.py +369 -0
- kailash/workflow/migration.py +3 -3
- kailash/workflow/type_inference.py +669 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/METADATA +44 -27
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/RECORD +78 -28
- kailash/nexus/__init__.py +0 -21
- kailash/nexus/cli/__init__.py +0 -5
- kailash/nexus/cli/__main__.py +0 -6
- kailash/nexus/cli/main.py +0 -176
- kailash/nexus/factory.py +0 -413
- kailash/nexus/gateway.py +0 -545
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,393 @@
|
|
1
|
+
"""Edge warming node for predictive edge node preparation.
|
2
|
+
|
3
|
+
This node integrates predictive warming capabilities into workflows,
|
4
|
+
allowing automatic pre-warming of edge nodes based on usage patterns.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
from datetime import datetime
|
9
|
+
from typing import Any, Dict, List, Optional
|
10
|
+
|
11
|
+
from kailash.edge.prediction.predictive_warmer import (
|
12
|
+
PredictionStrategy,
|
13
|
+
PredictiveWarmer,
|
14
|
+
UsagePattern,
|
15
|
+
WarmingDecision,
|
16
|
+
)
|
17
|
+
from kailash.nodes.base import NodeParameter, register_node
|
18
|
+
from kailash.nodes.base_async import AsyncNode
|
19
|
+
|
20
|
+
|
21
|
+
@register_node()
|
22
|
+
class EdgeWarmingNode(AsyncNode):
|
23
|
+
"""Node for predictive edge warming operations.
|
24
|
+
|
25
|
+
This node provides predictive warming capabilities to anticipate
|
26
|
+
and prepare edge nodes before they're needed, reducing cold start latency.
|
27
|
+
|
28
|
+
Example:
|
29
|
+
>>> # Record usage pattern
|
30
|
+
>>> result = await warming_node.execute_async(
|
31
|
+
... operation="record_usage",
|
32
|
+
... edge_node="edge-west-1",
|
33
|
+
... user_id="user123",
|
34
|
+
... location=(37.7749, -122.4194),
|
35
|
+
... workload_type="ml_inference",
|
36
|
+
... response_time=0.250,
|
37
|
+
... resource_usage={"cpu": 0.3, "memory": 512}
|
38
|
+
... )
|
39
|
+
|
40
|
+
>>> # Get warming predictions
|
41
|
+
>>> result = await warming_node.execute_async(
|
42
|
+
... operation="predict",
|
43
|
+
... strategy="hybrid",
|
44
|
+
... max_nodes=5
|
45
|
+
... )
|
46
|
+
|
47
|
+
>>> # Execute warming
|
48
|
+
>>> result = await warming_node.execute_async(
|
49
|
+
... operation="warm_nodes",
|
50
|
+
... auto_execute=True
|
51
|
+
... )
|
52
|
+
"""
|
53
|
+
|
54
|
+
def __init__(self, **kwargs):
|
55
|
+
"""Initialize edge warming node."""
|
56
|
+
super().__init__(**kwargs)
|
57
|
+
|
58
|
+
# Extract configuration
|
59
|
+
history_window = kwargs.get("history_window", 7 * 24 * 60 * 60)
|
60
|
+
prediction_horizon = kwargs.get("prediction_horizon", 300)
|
61
|
+
confidence_threshold = kwargs.get("confidence_threshold", 0.7)
|
62
|
+
max_prewarmed_nodes = kwargs.get("max_prewarmed_nodes", 10)
|
63
|
+
|
64
|
+
# Initialize predictive warmer
|
65
|
+
self.warmer = PredictiveWarmer(
|
66
|
+
history_window=history_window,
|
67
|
+
prediction_horizon=prediction_horizon,
|
68
|
+
confidence_threshold=confidence_threshold,
|
69
|
+
max_prewarmed_nodes=max_prewarmed_nodes,
|
70
|
+
)
|
71
|
+
|
72
|
+
self._auto_warming_task = None
|
73
|
+
|
74
|
+
@property
|
75
|
+
def input_parameters(self) -> Dict[str, NodeParameter]:
|
76
|
+
"""Define input parameters."""
|
77
|
+
return {
|
78
|
+
"operation": NodeParameter(
|
79
|
+
name="operation",
|
80
|
+
type=str,
|
81
|
+
required=True,
|
82
|
+
description="Operation to perform (record_usage, predict, warm_nodes, evaluate, get_metrics, start_auto, stop_auto)",
|
83
|
+
),
|
84
|
+
# For record_usage
|
85
|
+
"edge_node": NodeParameter(
|
86
|
+
name="edge_node",
|
87
|
+
type=str,
|
88
|
+
required=False,
|
89
|
+
description="Edge node identifier",
|
90
|
+
),
|
91
|
+
"user_id": NodeParameter(
|
92
|
+
name="user_id",
|
93
|
+
type=str,
|
94
|
+
required=False,
|
95
|
+
description="User identifier for pattern analysis",
|
96
|
+
),
|
97
|
+
"location": NodeParameter(
|
98
|
+
name="location",
|
99
|
+
type=tuple,
|
100
|
+
required=False,
|
101
|
+
description="Geographic location (latitude, longitude)",
|
102
|
+
),
|
103
|
+
"workload_type": NodeParameter(
|
104
|
+
name="workload_type",
|
105
|
+
type=str,
|
106
|
+
required=False,
|
107
|
+
default="general",
|
108
|
+
description="Type of workload",
|
109
|
+
),
|
110
|
+
"response_time": NodeParameter(
|
111
|
+
name="response_time",
|
112
|
+
type=float,
|
113
|
+
required=False,
|
114
|
+
default=0.0,
|
115
|
+
description="Response time in seconds",
|
116
|
+
),
|
117
|
+
"resource_usage": NodeParameter(
|
118
|
+
name="resource_usage",
|
119
|
+
type=dict,
|
120
|
+
required=False,
|
121
|
+
default={},
|
122
|
+
description="Resource usage metrics",
|
123
|
+
),
|
124
|
+
# For predict
|
125
|
+
"strategy": NodeParameter(
|
126
|
+
name="strategy",
|
127
|
+
type=str,
|
128
|
+
required=False,
|
129
|
+
default="hybrid",
|
130
|
+
description="Prediction strategy (time_series, geographic, user_behavior, workload, hybrid)",
|
131
|
+
),
|
132
|
+
"max_nodes": NodeParameter(
|
133
|
+
name="max_nodes",
|
134
|
+
type=int,
|
135
|
+
required=False,
|
136
|
+
description="Maximum nodes to warm",
|
137
|
+
),
|
138
|
+
# For warm_nodes
|
139
|
+
"auto_execute": NodeParameter(
|
140
|
+
name="auto_execute",
|
141
|
+
type=bool,
|
142
|
+
required=False,
|
143
|
+
default=False,
|
144
|
+
description="Automatically execute warming decisions",
|
145
|
+
),
|
146
|
+
"nodes_to_warm": NodeParameter(
|
147
|
+
name="nodes_to_warm",
|
148
|
+
type=list,
|
149
|
+
required=False,
|
150
|
+
description="Specific nodes to warm",
|
151
|
+
),
|
152
|
+
# For evaluate
|
153
|
+
"was_used": NodeParameter(
|
154
|
+
name="was_used",
|
155
|
+
type=bool,
|
156
|
+
required=False,
|
157
|
+
description="Whether the predicted node was actually used",
|
158
|
+
),
|
159
|
+
# Configuration
|
160
|
+
"history_window": NodeParameter(
|
161
|
+
name="history_window",
|
162
|
+
type=int,
|
163
|
+
required=False,
|
164
|
+
default=7 * 24 * 60 * 60,
|
165
|
+
description="Time window for historical analysis (seconds)",
|
166
|
+
),
|
167
|
+
"prediction_horizon": NodeParameter(
|
168
|
+
name="prediction_horizon",
|
169
|
+
type=int,
|
170
|
+
required=False,
|
171
|
+
default=300,
|
172
|
+
description="How far ahead to predict (seconds)",
|
173
|
+
),
|
174
|
+
"confidence_threshold": NodeParameter(
|
175
|
+
name="confidence_threshold",
|
176
|
+
type=float,
|
177
|
+
required=False,
|
178
|
+
default=0.7,
|
179
|
+
description="Minimum confidence for warming",
|
180
|
+
),
|
181
|
+
"max_prewarmed_nodes": NodeParameter(
|
182
|
+
name="max_prewarmed_nodes",
|
183
|
+
type=int,
|
184
|
+
required=False,
|
185
|
+
default=10,
|
186
|
+
description="Maximum nodes to keep warm",
|
187
|
+
),
|
188
|
+
}
|
189
|
+
|
190
|
+
@property
|
191
|
+
def output_parameters(self) -> Dict[str, NodeParameter]:
|
192
|
+
"""Define output parameters."""
|
193
|
+
return {
|
194
|
+
"status": NodeParameter(
|
195
|
+
name="status", type=str, description="Operation status"
|
196
|
+
),
|
197
|
+
"predictions": NodeParameter(
|
198
|
+
name="predictions",
|
199
|
+
type=list,
|
200
|
+
required=False,
|
201
|
+
description="List of warming predictions",
|
202
|
+
),
|
203
|
+
"warmed_nodes": NodeParameter(
|
204
|
+
name="warmed_nodes",
|
205
|
+
type=list,
|
206
|
+
required=False,
|
207
|
+
description="List of warmed edge nodes",
|
208
|
+
),
|
209
|
+
"metrics": NodeParameter(
|
210
|
+
name="metrics",
|
211
|
+
type=dict,
|
212
|
+
required=False,
|
213
|
+
description="Prediction metrics",
|
214
|
+
),
|
215
|
+
"pattern_recorded": NodeParameter(
|
216
|
+
name="pattern_recorded",
|
217
|
+
type=bool,
|
218
|
+
required=False,
|
219
|
+
description="Whether usage pattern was recorded",
|
220
|
+
),
|
221
|
+
"auto_warming_active": NodeParameter(
|
222
|
+
name="auto_warming_active",
|
223
|
+
type=bool,
|
224
|
+
required=False,
|
225
|
+
description="Whether automatic warming is active",
|
226
|
+
),
|
227
|
+
}
|
228
|
+
|
229
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
230
|
+
"""Get all node parameters for compatibility."""
|
231
|
+
# Return only input parameters for validation
|
232
|
+
return self.input_parameters
|
233
|
+
|
234
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
235
|
+
"""Execute edge warming operation."""
|
236
|
+
operation = kwargs["operation"]
|
237
|
+
|
238
|
+
try:
|
239
|
+
if operation == "record_usage":
|
240
|
+
return await self._record_usage(kwargs)
|
241
|
+
elif operation == "predict":
|
242
|
+
return await self._predict_warming(kwargs)
|
243
|
+
elif operation == "warm_nodes":
|
244
|
+
return await self._warm_nodes(kwargs)
|
245
|
+
elif operation == "evaluate":
|
246
|
+
return await self._evaluate_prediction(kwargs)
|
247
|
+
elif operation == "get_metrics":
|
248
|
+
return await self._get_metrics()
|
249
|
+
elif operation == "start_auto":
|
250
|
+
return await self._start_auto_warming()
|
251
|
+
elif operation == "stop_auto":
|
252
|
+
return await self._stop_auto_warming()
|
253
|
+
else:
|
254
|
+
raise ValueError(f"Unknown operation: {operation}")
|
255
|
+
|
256
|
+
except Exception as e:
|
257
|
+
self.logger.error(f"Edge warming operation failed: {str(e)}")
|
258
|
+
return {"status": "error", "error": str(e)}
|
259
|
+
|
260
|
+
async def _record_usage(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
261
|
+
"""Record a usage pattern."""
|
262
|
+
# Create usage pattern
|
263
|
+
pattern = UsagePattern(
|
264
|
+
timestamp=datetime.now(),
|
265
|
+
edge_node=kwargs.get("edge_node", "unknown"),
|
266
|
+
user_id=kwargs.get("user_id"),
|
267
|
+
location=kwargs.get("location"),
|
268
|
+
workload_type=kwargs.get("workload_type", "general"),
|
269
|
+
response_time=kwargs.get("response_time", 0.0),
|
270
|
+
resource_usage=kwargs.get("resource_usage", {}),
|
271
|
+
)
|
272
|
+
|
273
|
+
# Record pattern
|
274
|
+
await self.warmer.record_usage(pattern)
|
275
|
+
|
276
|
+
return {
|
277
|
+
"status": "success",
|
278
|
+
"pattern_recorded": True,
|
279
|
+
"patterns_total": len(self.warmer.usage_history),
|
280
|
+
}
|
281
|
+
|
282
|
+
async def _predict_warming(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
283
|
+
"""Make warming predictions."""
|
284
|
+
# Get strategy
|
285
|
+
strategy_name = kwargs.get("strategy", "hybrid")
|
286
|
+
try:
|
287
|
+
strategy = PredictionStrategy(strategy_name)
|
288
|
+
except ValueError:
|
289
|
+
strategy = PredictionStrategy.HYBRID
|
290
|
+
|
291
|
+
# Make predictions
|
292
|
+
decisions = await self.warmer.predict_warming_needs(strategy)
|
293
|
+
|
294
|
+
# Format predictions
|
295
|
+
predictions = []
|
296
|
+
for decision in decisions:
|
297
|
+
predictions.append(
|
298
|
+
{
|
299
|
+
"edge_node": decision.edge_node,
|
300
|
+
"confidence": decision.confidence,
|
301
|
+
"predicted_time": decision.predicted_time.isoformat(),
|
302
|
+
"resources_needed": decision.resources_needed,
|
303
|
+
"strategy": decision.strategy_used.value,
|
304
|
+
"reasoning": decision.reasoning,
|
305
|
+
}
|
306
|
+
)
|
307
|
+
|
308
|
+
return {
|
309
|
+
"status": "success",
|
310
|
+
"predictions": predictions,
|
311
|
+
"prediction_count": len(predictions),
|
312
|
+
}
|
313
|
+
|
314
|
+
async def _warm_nodes(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
315
|
+
"""Execute node warming."""
|
316
|
+
warmed_nodes = []
|
317
|
+
|
318
|
+
if kwargs.get("auto_execute", False):
|
319
|
+
# Use predictions to warm nodes
|
320
|
+
decisions = await self.warmer.predict_warming_needs()
|
321
|
+
|
322
|
+
for decision in decisions:
|
323
|
+
# Simulate warming
|
324
|
+
await self._warm_single_node(
|
325
|
+
decision.edge_node, decision.resources_needed
|
326
|
+
)
|
327
|
+
warmed_nodes.append(decision.edge_node)
|
328
|
+
|
329
|
+
elif kwargs.get("nodes_to_warm"):
|
330
|
+
# Warm specific nodes
|
331
|
+
for node in kwargs["nodes_to_warm"]:
|
332
|
+
await self._warm_single_node(node, {"cpu": 0.1, "memory": 128})
|
333
|
+
warmed_nodes.append(node)
|
334
|
+
|
335
|
+
return {
|
336
|
+
"status": "success",
|
337
|
+
"warmed_nodes": warmed_nodes,
|
338
|
+
"warmed_count": len(warmed_nodes),
|
339
|
+
}
|
340
|
+
|
341
|
+
async def _warm_single_node(self, edge_node: str, resources: Dict[str, float]):
|
342
|
+
"""Warm a single edge node."""
|
343
|
+
# TODO: Implement actual edge warming
|
344
|
+
# This would involve:
|
345
|
+
# 1. Connecting to edge infrastructure
|
346
|
+
# 2. Pre-allocating resources
|
347
|
+
# 3. Loading necessary data/models
|
348
|
+
# 4. Running health checks
|
349
|
+
|
350
|
+
# For now, simulate warming
|
351
|
+
self.logger.info(f"Warming edge node {edge_node} with resources {resources}")
|
352
|
+
await asyncio.sleep(0.1) # Simulate warming time
|
353
|
+
|
354
|
+
async def _evaluate_prediction(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
355
|
+
"""Evaluate a prediction."""
|
356
|
+
edge_node = kwargs.get("edge_node", "unknown")
|
357
|
+
was_used = kwargs.get("was_used", False)
|
358
|
+
|
359
|
+
self.warmer.evaluate_prediction(edge_node, was_used)
|
360
|
+
|
361
|
+
return {
|
362
|
+
"status": "success",
|
363
|
+
"edge_node": edge_node,
|
364
|
+
"was_used": was_used,
|
365
|
+
"evaluation_recorded": True,
|
366
|
+
}
|
367
|
+
|
368
|
+
async def _get_metrics(self) -> Dict[str, Any]:
|
369
|
+
"""Get prediction metrics."""
|
370
|
+
metrics = self.warmer.get_metrics()
|
371
|
+
|
372
|
+
return {"status": "success", "metrics": metrics}
|
373
|
+
|
374
|
+
async def _start_auto_warming(self) -> Dict[str, Any]:
|
375
|
+
"""Start automatic warming."""
|
376
|
+
if not self._auto_warming_task:
|
377
|
+
await self.warmer.start()
|
378
|
+
self._auto_warming_task = self.warmer._prediction_task
|
379
|
+
|
380
|
+
return {"status": "success", "auto_warming_active": True}
|
381
|
+
|
382
|
+
async def _stop_auto_warming(self) -> Dict[str, Any]:
|
383
|
+
"""Stop automatic warming."""
|
384
|
+
if self._auto_warming_task:
|
385
|
+
await self.warmer.stop()
|
386
|
+
self._auto_warming_task = None
|
387
|
+
|
388
|
+
return {"status": "success", "auto_warming_active": False}
|
389
|
+
|
390
|
+
async def cleanup(self):
|
391
|
+
"""Clean up resources."""
|
392
|
+
if self._auto_warming_task:
|
393
|
+
await self.warmer.stop()
|