kailash 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -7
- kailash/cli/__init__.py +11 -1
- kailash/cli/validation_audit.py +570 -0
- kailash/core/actors/supervisor.py +1 -1
- kailash/core/resilience/circuit_breaker.py +71 -1
- kailash/core/resilience/health_monitor.py +172 -0
- kailash/edge/compliance.py +33 -0
- kailash/edge/consistency.py +609 -0
- kailash/edge/coordination/__init__.py +30 -0
- kailash/edge/coordination/global_ordering.py +355 -0
- kailash/edge/coordination/leader_election.py +217 -0
- kailash/edge/coordination/partition_detector.py +296 -0
- kailash/edge/coordination/raft.py +485 -0
- kailash/edge/discovery.py +63 -1
- kailash/edge/migration/__init__.py +19 -0
- kailash/edge/migration/edge_migrator.py +832 -0
- kailash/edge/monitoring/__init__.py +21 -0
- kailash/edge/monitoring/edge_monitor.py +736 -0
- kailash/edge/prediction/__init__.py +10 -0
- kailash/edge/prediction/predictive_warmer.py +591 -0
- kailash/edge/resource/__init__.py +102 -0
- kailash/edge/resource/cloud_integration.py +796 -0
- kailash/edge/resource/cost_optimizer.py +949 -0
- kailash/edge/resource/docker_integration.py +919 -0
- kailash/edge/resource/kubernetes_integration.py +893 -0
- kailash/edge/resource/platform_integration.py +913 -0
- kailash/edge/resource/predictive_scaler.py +959 -0
- kailash/edge/resource/resource_analyzer.py +824 -0
- kailash/edge/resource/resource_pools.py +610 -0
- kailash/integrations/dataflow_edge.py +261 -0
- kailash/mcp_server/registry_integration.py +1 -1
- kailash/monitoring/__init__.py +18 -0
- kailash/monitoring/alerts.py +646 -0
- kailash/monitoring/metrics.py +677 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/__init__.py +17 -0
- kailash/nodes/ai/a2a.py +1914 -43
- kailash/nodes/ai/a2a_backup.py +1807 -0
- kailash/nodes/ai/hybrid_search.py +972 -0
- kailash/nodes/ai/semantic_memory.py +558 -0
- kailash/nodes/ai/streaming_analytics.py +947 -0
- kailash/nodes/base.py +545 -0
- kailash/nodes/edge/__init__.py +36 -0
- kailash/nodes/edge/base.py +240 -0
- kailash/nodes/edge/cloud_node.py +710 -0
- kailash/nodes/edge/coordination.py +239 -0
- kailash/nodes/edge/docker_node.py +825 -0
- kailash/nodes/edge/edge_data.py +582 -0
- kailash/nodes/edge/edge_migration_node.py +392 -0
- kailash/nodes/edge/edge_monitoring_node.py +421 -0
- kailash/nodes/edge/edge_state.py +673 -0
- kailash/nodes/edge/edge_warming_node.py +393 -0
- kailash/nodes/edge/kubernetes_node.py +652 -0
- kailash/nodes/edge/platform_node.py +766 -0
- kailash/nodes/edge/resource_analyzer_node.py +378 -0
- kailash/nodes/edge/resource_optimizer_node.py +501 -0
- kailash/nodes/edge/resource_scaler_node.py +397 -0
- kailash/nodes/ports.py +676 -0
- kailash/runtime/local.py +344 -1
- kailash/runtime/validation/__init__.py +20 -0
- kailash/runtime/validation/connection_context.py +119 -0
- kailash/runtime/validation/enhanced_error_formatter.py +202 -0
- kailash/runtime/validation/error_categorizer.py +164 -0
- kailash/runtime/validation/metrics.py +380 -0
- kailash/runtime/validation/performance.py +615 -0
- kailash/runtime/validation/suggestion_engine.py +212 -0
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +234 -8
- kailash/workflow/contracts.py +418 -0
- kailash/workflow/edge_infrastructure.py +369 -0
- kailash/workflow/migration.py +3 -3
- kailash/workflow/type_inference.py +669 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/METADATA +44 -27
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/RECORD +78 -28
- kailash/nexus/__init__.py +0 -21
- kailash/nexus/cli/__init__.py +0 -5
- kailash/nexus/cli/__main__.py +0 -6
- kailash/nexus/cli/main.py +0 -176
- kailash/nexus/factory.py +0 -413
- kailash/nexus/gateway.py +0 -545
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/WHEEL +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/entry_points.txt +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.8.3.dist-info → kailash-0.8.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,378 @@
|
|
1
|
+
"""Resource analyzer node for intelligent resource management.
|
2
|
+
|
3
|
+
This node integrates resource analysis capabilities into workflows,
|
4
|
+
providing insights into resource usage patterns and bottlenecks.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
from datetime import datetime, timedelta
|
9
|
+
from typing import Any, Dict, List, Optional
|
10
|
+
|
11
|
+
from kailash.edge.resource.resource_analyzer import (
|
12
|
+
Bottleneck,
|
13
|
+
ResourceAnalyzer,
|
14
|
+
ResourceMetric,
|
15
|
+
ResourcePattern,
|
16
|
+
ResourceType,
|
17
|
+
)
|
18
|
+
from kailash.nodes.base import NodeParameter, register_node
|
19
|
+
from kailash.nodes.base_async import AsyncNode
|
20
|
+
|
21
|
+
|
22
|
+
@register_node()
|
23
|
+
class ResourceAnalyzerNode(AsyncNode):
|
24
|
+
"""Node for resource analysis and optimization operations.
|
25
|
+
|
26
|
+
This node provides comprehensive resource analysis capabilities including
|
27
|
+
pattern identification, bottleneck detection, and optimization recommendations.
|
28
|
+
|
29
|
+
Example:
|
30
|
+
>>> # Record resource metric
|
31
|
+
>>> result = await analyzer_node.execute_async(
|
32
|
+
... operation="record_metric",
|
33
|
+
... edge_node="edge-west-1",
|
34
|
+
... resource_type="cpu",
|
35
|
+
... used=2.5,
|
36
|
+
... total=4.0
|
37
|
+
... )
|
38
|
+
|
39
|
+
>>> # Analyze resources
|
40
|
+
>>> result = await analyzer_node.execute_async(
|
41
|
+
... operation="analyze",
|
42
|
+
... include_patterns=True,
|
43
|
+
... include_bottlenecks=True
|
44
|
+
... )
|
45
|
+
|
46
|
+
>>> # Get trends
|
47
|
+
>>> result = await analyzer_node.execute_async(
|
48
|
+
... operation="get_trends",
|
49
|
+
... edge_node="edge-west-1",
|
50
|
+
... duration_minutes=60
|
51
|
+
... )
|
52
|
+
|
53
|
+
>>> # Get recommendations
|
54
|
+
>>> result = await analyzer_node.execute_async(
|
55
|
+
... operation="get_recommendations"
|
56
|
+
... )
|
57
|
+
"""
|
58
|
+
|
59
|
+
def __init__(self, **kwargs):
|
60
|
+
"""Initialize resource analyzer node."""
|
61
|
+
super().__init__(**kwargs)
|
62
|
+
|
63
|
+
# Extract configuration
|
64
|
+
history_window = kwargs.get("history_window", 3600)
|
65
|
+
analysis_interval = kwargs.get("analysis_interval", 60)
|
66
|
+
anomaly_threshold = kwargs.get("anomaly_threshold", 2.5)
|
67
|
+
pattern_confidence_threshold = kwargs.get("pattern_confidence_threshold", 0.7)
|
68
|
+
|
69
|
+
# Initialize analyzer
|
70
|
+
self.analyzer = ResourceAnalyzer(
|
71
|
+
history_window=history_window,
|
72
|
+
analysis_interval=analysis_interval,
|
73
|
+
anomaly_threshold=anomaly_threshold,
|
74
|
+
pattern_confidence_threshold=pattern_confidence_threshold,
|
75
|
+
)
|
76
|
+
|
77
|
+
self._analyzer_started = False
|
78
|
+
|
79
|
+
@property
|
80
|
+
def input_parameters(self) -> Dict[str, NodeParameter]:
|
81
|
+
"""Define input parameters."""
|
82
|
+
return {
|
83
|
+
"operation": NodeParameter(
|
84
|
+
name="operation",
|
85
|
+
type=str,
|
86
|
+
required=True,
|
87
|
+
description="Operation to perform (record_metric, analyze, get_trends, get_recommendations, start_analyzer, stop_analyzer)",
|
88
|
+
),
|
89
|
+
# For record_metric
|
90
|
+
"edge_node": NodeParameter(
|
91
|
+
name="edge_node",
|
92
|
+
type=str,
|
93
|
+
required=False,
|
94
|
+
description="Edge node identifier",
|
95
|
+
),
|
96
|
+
"resource_type": NodeParameter(
|
97
|
+
name="resource_type",
|
98
|
+
type=str,
|
99
|
+
required=False,
|
100
|
+
description="Type of resource (cpu, memory, gpu, storage, network)",
|
101
|
+
),
|
102
|
+
"used": NodeParameter(
|
103
|
+
name="used",
|
104
|
+
type=float,
|
105
|
+
required=False,
|
106
|
+
description="Amount of resource used",
|
107
|
+
),
|
108
|
+
"available": NodeParameter(
|
109
|
+
name="available",
|
110
|
+
type=float,
|
111
|
+
required=False,
|
112
|
+
description="Amount of resource available",
|
113
|
+
),
|
114
|
+
"total": NodeParameter(
|
115
|
+
name="total",
|
116
|
+
type=float,
|
117
|
+
required=False,
|
118
|
+
description="Total resource capacity",
|
119
|
+
),
|
120
|
+
"metadata": NodeParameter(
|
121
|
+
name="metadata",
|
122
|
+
type=dict,
|
123
|
+
required=False,
|
124
|
+
default={},
|
125
|
+
description="Additional metric metadata",
|
126
|
+
),
|
127
|
+
# For analyze
|
128
|
+
"include_patterns": NodeParameter(
|
129
|
+
name="include_patterns",
|
130
|
+
type=bool,
|
131
|
+
required=False,
|
132
|
+
default=True,
|
133
|
+
description="Include pattern analysis",
|
134
|
+
),
|
135
|
+
"include_bottlenecks": NodeParameter(
|
136
|
+
name="include_bottlenecks",
|
137
|
+
type=bool,
|
138
|
+
required=False,
|
139
|
+
default=True,
|
140
|
+
description="Include bottleneck detection",
|
141
|
+
),
|
142
|
+
"include_anomalies": NodeParameter(
|
143
|
+
name="include_anomalies",
|
144
|
+
type=bool,
|
145
|
+
required=False,
|
146
|
+
default=True,
|
147
|
+
description="Include anomaly detection",
|
148
|
+
),
|
149
|
+
# For get_trends
|
150
|
+
"duration_minutes": NodeParameter(
|
151
|
+
name="duration_minutes",
|
152
|
+
type=int,
|
153
|
+
required=False,
|
154
|
+
default=60,
|
155
|
+
description="Duration for trend analysis",
|
156
|
+
),
|
157
|
+
# Configuration
|
158
|
+
"history_window": NodeParameter(
|
159
|
+
name="history_window",
|
160
|
+
type=int,
|
161
|
+
required=False,
|
162
|
+
default=3600,
|
163
|
+
description="Time window for analysis (seconds)",
|
164
|
+
),
|
165
|
+
"analysis_interval": NodeParameter(
|
166
|
+
name="analysis_interval",
|
167
|
+
type=int,
|
168
|
+
required=False,
|
169
|
+
default=60,
|
170
|
+
description="Interval between analyses (seconds)",
|
171
|
+
),
|
172
|
+
"anomaly_threshold": NodeParameter(
|
173
|
+
name="anomaly_threshold",
|
174
|
+
type=float,
|
175
|
+
required=False,
|
176
|
+
default=2.5,
|
177
|
+
description="Threshold for anomaly detection (std devs)",
|
178
|
+
),
|
179
|
+
"pattern_confidence_threshold": NodeParameter(
|
180
|
+
name="pattern_confidence_threshold",
|
181
|
+
type=float,
|
182
|
+
required=False,
|
183
|
+
default=0.7,
|
184
|
+
description="Minimum confidence for patterns",
|
185
|
+
),
|
186
|
+
}
|
187
|
+
|
188
|
+
@property
|
189
|
+
def output_parameters(self) -> Dict[str, NodeParameter]:
|
190
|
+
"""Define output parameters."""
|
191
|
+
return {
|
192
|
+
"status": NodeParameter(
|
193
|
+
name="status", type=str, description="Operation status"
|
194
|
+
),
|
195
|
+
"patterns": NodeParameter(
|
196
|
+
name="patterns",
|
197
|
+
type=list,
|
198
|
+
required=False,
|
199
|
+
description="Identified resource patterns",
|
200
|
+
),
|
201
|
+
"bottlenecks": NodeParameter(
|
202
|
+
name="bottlenecks",
|
203
|
+
type=list,
|
204
|
+
required=False,
|
205
|
+
description="Detected bottlenecks",
|
206
|
+
),
|
207
|
+
"anomalies": NodeParameter(
|
208
|
+
name="anomalies",
|
209
|
+
type=list,
|
210
|
+
required=False,
|
211
|
+
description="Detected anomalies",
|
212
|
+
),
|
213
|
+
"trends": NodeParameter(
|
214
|
+
name="trends",
|
215
|
+
type=dict,
|
216
|
+
required=False,
|
217
|
+
description="Resource usage trends",
|
218
|
+
),
|
219
|
+
"recommendations": NodeParameter(
|
220
|
+
name="recommendations",
|
221
|
+
type=list,
|
222
|
+
required=False,
|
223
|
+
description="Optimization recommendations",
|
224
|
+
),
|
225
|
+
"analysis_summary": NodeParameter(
|
226
|
+
name="analysis_summary",
|
227
|
+
type=dict,
|
228
|
+
required=False,
|
229
|
+
description="Analysis summary",
|
230
|
+
),
|
231
|
+
"metric_recorded": NodeParameter(
|
232
|
+
name="metric_recorded",
|
233
|
+
type=bool,
|
234
|
+
required=False,
|
235
|
+
description="Whether metric was recorded",
|
236
|
+
),
|
237
|
+
"analyzer_active": NodeParameter(
|
238
|
+
name="analyzer_active",
|
239
|
+
type=bool,
|
240
|
+
required=False,
|
241
|
+
description="Whether analyzer is active",
|
242
|
+
),
|
243
|
+
}
|
244
|
+
|
245
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
246
|
+
"""Get all node parameters for compatibility."""
|
247
|
+
return self.input_parameters
|
248
|
+
|
249
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
250
|
+
"""Execute resource analysis operation."""
|
251
|
+
operation = kwargs["operation"]
|
252
|
+
|
253
|
+
try:
|
254
|
+
if operation == "record_metric":
|
255
|
+
return await self._record_metric(kwargs)
|
256
|
+
elif operation == "analyze":
|
257
|
+
return await self._analyze_resources(kwargs)
|
258
|
+
elif operation == "get_trends":
|
259
|
+
return await self._get_trends(kwargs)
|
260
|
+
elif operation == "get_recommendations":
|
261
|
+
return await self._get_recommendations()
|
262
|
+
elif operation == "start_analyzer":
|
263
|
+
return await self._start_analyzer()
|
264
|
+
elif operation == "stop_analyzer":
|
265
|
+
return await self._stop_analyzer()
|
266
|
+
else:
|
267
|
+
raise ValueError(f"Unknown operation: {operation}")
|
268
|
+
|
269
|
+
except Exception as e:
|
270
|
+
self.logger.error(f"Resource analysis operation failed: {str(e)}")
|
271
|
+
return {"status": "error", "error": str(e)}
|
272
|
+
|
273
|
+
async def _record_metric(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
274
|
+
"""Record a resource metric."""
|
275
|
+
# Parse resource type
|
276
|
+
resource_type_str = kwargs.get("resource_type", "cpu")
|
277
|
+
try:
|
278
|
+
resource_type = ResourceType(resource_type_str)
|
279
|
+
except ValueError:
|
280
|
+
resource_type = ResourceType.CUSTOM
|
281
|
+
|
282
|
+
# Create metric
|
283
|
+
metric = ResourceMetric(
|
284
|
+
timestamp=datetime.now(),
|
285
|
+
edge_node=kwargs.get("edge_node", "unknown"),
|
286
|
+
resource_type=resource_type,
|
287
|
+
used=kwargs.get("used", 0.0),
|
288
|
+
available=kwargs.get(
|
289
|
+
"available", kwargs.get("total", 0.0) - kwargs.get("used", 0.0)
|
290
|
+
),
|
291
|
+
total=kwargs.get("total", 0.0),
|
292
|
+
metadata=kwargs.get("metadata", {}),
|
293
|
+
)
|
294
|
+
|
295
|
+
# Record metric
|
296
|
+
await self.analyzer.record_metric(metric)
|
297
|
+
|
298
|
+
return {
|
299
|
+
"status": "success",
|
300
|
+
"metric_recorded": True,
|
301
|
+
"metric": metric.to_dict(),
|
302
|
+
}
|
303
|
+
|
304
|
+
async def _analyze_resources(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
305
|
+
"""Perform resource analysis."""
|
306
|
+
# Run analysis
|
307
|
+
analysis = await self.analyzer.analyze_resources()
|
308
|
+
|
309
|
+
# Filter results based on parameters
|
310
|
+
result = {"status": "success"}
|
311
|
+
|
312
|
+
if kwargs.get("include_patterns", True):
|
313
|
+
result["patterns"] = analysis["patterns"]
|
314
|
+
|
315
|
+
if kwargs.get("include_bottlenecks", True):
|
316
|
+
result["bottlenecks"] = analysis["bottlenecks"]
|
317
|
+
|
318
|
+
if kwargs.get("include_anomalies", True):
|
319
|
+
result["anomalies"] = analysis["anomalies"]
|
320
|
+
|
321
|
+
result["analysis_summary"] = analysis["summary"]
|
322
|
+
|
323
|
+
return result
|
324
|
+
|
325
|
+
async def _get_trends(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
326
|
+
"""Get resource trends."""
|
327
|
+
# Parse parameters
|
328
|
+
edge_node = kwargs.get("edge_node")
|
329
|
+
resource_type_str = kwargs.get("resource_type")
|
330
|
+
duration_minutes = kwargs.get("duration_minutes", 60)
|
331
|
+
|
332
|
+
# Parse resource type if provided
|
333
|
+
resource_type = None
|
334
|
+
if resource_type_str:
|
335
|
+
try:
|
336
|
+
resource_type = ResourceType(resource_type_str)
|
337
|
+
except ValueError:
|
338
|
+
pass
|
339
|
+
|
340
|
+
# Get trends
|
341
|
+
trends = await self.analyzer.get_resource_trends(
|
342
|
+
edge_node=edge_node,
|
343
|
+
resource_type=resource_type,
|
344
|
+
duration_minutes=duration_minutes,
|
345
|
+
)
|
346
|
+
|
347
|
+
return {"status": "success", "trends": trends, "trend_count": len(trends)}
|
348
|
+
|
349
|
+
async def _get_recommendations(self) -> Dict[str, Any]:
|
350
|
+
"""Get optimization recommendations."""
|
351
|
+
recommendations = await self.analyzer.get_optimization_recommendations()
|
352
|
+
|
353
|
+
return {
|
354
|
+
"status": "success",
|
355
|
+
"recommendations": recommendations,
|
356
|
+
"recommendation_count": len(recommendations),
|
357
|
+
}
|
358
|
+
|
359
|
+
async def _start_analyzer(self) -> Dict[str, Any]:
|
360
|
+
"""Start background analyzer."""
|
361
|
+
if not self._analyzer_started:
|
362
|
+
await self.analyzer.start()
|
363
|
+
self._analyzer_started = True
|
364
|
+
|
365
|
+
return {"status": "success", "analyzer_active": True}
|
366
|
+
|
367
|
+
async def _stop_analyzer(self) -> Dict[str, Any]:
|
368
|
+
"""Stop background analyzer."""
|
369
|
+
if self._analyzer_started:
|
370
|
+
await self.analyzer.stop()
|
371
|
+
self._analyzer_started = False
|
372
|
+
|
373
|
+
return {"status": "success", "analyzer_active": False}
|
374
|
+
|
375
|
+
async def cleanup(self):
|
376
|
+
"""Clean up resources."""
|
377
|
+
if self._analyzer_started:
|
378
|
+
await self.analyzer.stop()
|