kailash 0.4.2__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/client/__init__.py +12 -0
- kailash/client/enhanced_client.py +306 -0
- kailash/core/actors/__init__.py +16 -0
- kailash/core/actors/connection_actor.py +566 -0
- kailash/core/actors/supervisor.py +364 -0
- kailash/edge/__init__.py +16 -0
- kailash/edge/compliance.py +834 -0
- kailash/edge/discovery.py +659 -0
- kailash/edge/location.py +582 -0
- kailash/gateway/__init__.py +33 -0
- kailash/gateway/api.py +289 -0
- kailash/gateway/enhanced_gateway.py +357 -0
- kailash/gateway/resource_resolver.py +217 -0
- kailash/gateway/security.py +227 -0
- kailash/middleware/auth/models.py +2 -2
- kailash/middleware/database/base_models.py +1 -7
- kailash/middleware/database/repositories.py +3 -1
- kailash/middleware/gateway/__init__.py +22 -0
- kailash/middleware/gateway/checkpoint_manager.py +398 -0
- kailash/middleware/gateway/deduplicator.py +382 -0
- kailash/middleware/gateway/durable_gateway.py +417 -0
- kailash/middleware/gateway/durable_request.py +498 -0
- kailash/middleware/gateway/event_store.py +459 -0
- kailash/nodes/admin/audit_log.py +364 -6
- kailash/nodes/admin/permission_check.py +817 -33
- kailash/nodes/admin/role_management.py +1242 -108
- kailash/nodes/admin/schema_manager.py +438 -0
- kailash/nodes/admin/user_management.py +1209 -681
- kailash/nodes/api/http.py +95 -71
- kailash/nodes/base.py +281 -164
- kailash/nodes/base_async.py +30 -31
- kailash/nodes/code/__init__.py +8 -1
- kailash/nodes/code/async_python.py +1035 -0
- kailash/nodes/code/python.py +1 -0
- kailash/nodes/data/async_sql.py +12 -25
- kailash/nodes/data/sql.py +20 -11
- kailash/nodes/data/workflow_connection_pool.py +643 -0
- kailash/nodes/rag/__init__.py +1 -4
- kailash/resources/__init__.py +40 -0
- kailash/resources/factory.py +533 -0
- kailash/resources/health.py +319 -0
- kailash/resources/reference.py +288 -0
- kailash/resources/registry.py +392 -0
- kailash/runtime/async_local.py +711 -302
- kailash/testing/__init__.py +34 -0
- kailash/testing/async_test_case.py +353 -0
- kailash/testing/async_utils.py +345 -0
- kailash/testing/fixtures.py +458 -0
- kailash/testing/mock_registry.py +495 -0
- kailash/utils/resource_manager.py +420 -0
- kailash/workflow/__init__.py +8 -0
- kailash/workflow/async_builder.py +621 -0
- kailash/workflow/async_patterns.py +766 -0
- kailash/workflow/builder.py +93 -10
- kailash/workflow/cyclic_runner.py +111 -41
- kailash/workflow/graph.py +7 -2
- kailash/workflow/resilience.py +11 -1
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/METADATA +12 -7
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/RECORD +64 -28
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/WHEEL +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/top_level.txt +0 -0
kailash/edge/location.py
ADDED
@@ -0,0 +1,582 @@
|
|
1
|
+
"""Edge location management for global compute distribution."""
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
import logging
|
6
|
+
import time
|
7
|
+
import uuid
|
8
|
+
from dataclasses import dataclass
|
9
|
+
from datetime import UTC, datetime, timedelta
|
10
|
+
from enum import Enum
|
11
|
+
from typing import Any, Dict, List, Optional, Set, Union
|
12
|
+
|
13
|
+
logger = logging.getLogger(__name__)
|
14
|
+
|
15
|
+
|
16
|
+
class EdgeRegion(Enum):
|
17
|
+
"""Standard geographic regions for edge deployment."""
|
18
|
+
|
19
|
+
# North America
|
20
|
+
US_EAST = "us-east"
|
21
|
+
US_WEST = "us-west"
|
22
|
+
US_CENTRAL = "us-central"
|
23
|
+
CANADA = "canada"
|
24
|
+
|
25
|
+
# Europe
|
26
|
+
EU_WEST = "eu-west"
|
27
|
+
EU_CENTRAL = "eu-central"
|
28
|
+
EU_NORTH = "eu-north"
|
29
|
+
UK = "uk"
|
30
|
+
|
31
|
+
# Asia Pacific
|
32
|
+
ASIA_SOUTHEAST = "asia-southeast"
|
33
|
+
ASIA_EAST = "asia-east"
|
34
|
+
ASIA_SOUTH = "asia-south"
|
35
|
+
JAPAN = "japan"
|
36
|
+
AUSTRALIA = "australia"
|
37
|
+
|
38
|
+
# Other regions
|
39
|
+
SOUTH_AMERICA = "south-america"
|
40
|
+
AFRICA = "africa"
|
41
|
+
MIDDLE_EAST = "middle-east"
|
42
|
+
|
43
|
+
|
44
|
+
class EdgeStatus(Enum):
|
45
|
+
"""Edge location operational status."""
|
46
|
+
|
47
|
+
ACTIVE = "active"
|
48
|
+
DEGRADED = "degraded"
|
49
|
+
MAINTENANCE = "maintenance"
|
50
|
+
OFFLINE = "offline"
|
51
|
+
DRAINING = "draining" # Stopping new workloads
|
52
|
+
|
53
|
+
|
54
|
+
class ComplianceZone(Enum):
|
55
|
+
"""Data compliance and sovereignty zones."""
|
56
|
+
|
57
|
+
# Data sovereignty regions
|
58
|
+
GDPR = "gdpr" # EU/EEA
|
59
|
+
CCPA = "ccpa" # California
|
60
|
+
PIPEDA = "pipeda" # Canada
|
61
|
+
LGPD = "lgpd" # Brazil
|
62
|
+
|
63
|
+
# Industry compliance
|
64
|
+
HIPAA = "hipaa" # Healthcare (US)
|
65
|
+
SOX = "sox" # Financial (US)
|
66
|
+
PCI_DSS = "pci_dss" # Payment cards
|
67
|
+
|
68
|
+
# Government/security
|
69
|
+
FEDRAMP = "fedramp" # US Government
|
70
|
+
ITAR = "itar" # Export control (US)
|
71
|
+
|
72
|
+
# General zones
|
73
|
+
PUBLIC = "public" # No restrictions
|
74
|
+
RESTRICTED = "restricted" # Custom restrictions
|
75
|
+
|
76
|
+
|
77
|
+
@dataclass
|
78
|
+
class GeographicCoordinates:
|
79
|
+
"""Geographic coordinates for edge location."""
|
80
|
+
|
81
|
+
latitude: float
|
82
|
+
longitude: float
|
83
|
+
|
84
|
+
def distance_to(self, other: "GeographicCoordinates") -> float:
|
85
|
+
"""Calculate distance to another location in kilometers using Haversine formula."""
|
86
|
+
import math
|
87
|
+
|
88
|
+
# Convert to radians
|
89
|
+
lat1, lon1 = math.radians(self.latitude), math.radians(self.longitude)
|
90
|
+
lat2, lon2 = math.radians(other.latitude), math.radians(other.longitude)
|
91
|
+
|
92
|
+
# Haversine formula
|
93
|
+
dlat = lat2 - lat1
|
94
|
+
dlon = lon2 - lon1
|
95
|
+
a = (
|
96
|
+
math.sin(dlat / 2) ** 2
|
97
|
+
+ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
|
98
|
+
)
|
99
|
+
c = 2 * math.asin(math.sqrt(a))
|
100
|
+
|
101
|
+
# Earth radius in kilometers
|
102
|
+
earth_radius = 6371
|
103
|
+
return earth_radius * c
|
104
|
+
|
105
|
+
|
106
|
+
@dataclass
|
107
|
+
class EdgeCapabilities:
|
108
|
+
"""Resource capabilities available at an edge location."""
|
109
|
+
|
110
|
+
# Compute resources
|
111
|
+
cpu_cores: int
|
112
|
+
memory_gb: float
|
113
|
+
storage_gb: float
|
114
|
+
gpu_available: bool = False
|
115
|
+
gpu_type: Optional[str] = None
|
116
|
+
|
117
|
+
# Network capabilities
|
118
|
+
bandwidth_gbps: float = 1.0
|
119
|
+
supports_ipv6: bool = True
|
120
|
+
cdn_enabled: bool = True
|
121
|
+
|
122
|
+
# Service capabilities
|
123
|
+
database_support: List[str] = None # ["postgresql", "mongodb", "redis"]
|
124
|
+
ai_models_available: List[str] = None # ["llama", "gpt", "claude"]
|
125
|
+
container_runtime: str = "docker"
|
126
|
+
|
127
|
+
# Compliance and security
|
128
|
+
encryption_at_rest: bool = True
|
129
|
+
encryption_in_transit: bool = True
|
130
|
+
audit_logging: bool = True
|
131
|
+
|
132
|
+
def __post_init__(self):
|
133
|
+
if self.database_support is None:
|
134
|
+
self.database_support = ["postgresql", "redis"]
|
135
|
+
if self.ai_models_available is None:
|
136
|
+
self.ai_models_available = []
|
137
|
+
|
138
|
+
|
139
|
+
@dataclass
|
140
|
+
class EdgeMetrics:
|
141
|
+
"""Real-time metrics for edge location performance."""
|
142
|
+
|
143
|
+
# Performance metrics
|
144
|
+
cpu_utilization: float = 0.0 # 0.0 to 1.0
|
145
|
+
memory_utilization: float = 0.0
|
146
|
+
storage_utilization: float = 0.0
|
147
|
+
|
148
|
+
# Network metrics
|
149
|
+
latency_p50_ms: float = 0.0
|
150
|
+
latency_p95_ms: float = 0.0
|
151
|
+
latency_p99_ms: float = 0.0
|
152
|
+
throughput_rps: int = 0
|
153
|
+
|
154
|
+
# Reliability metrics
|
155
|
+
uptime_percentage: float = 100.0
|
156
|
+
error_rate: float = 0.0 # 0.0 to 1.0
|
157
|
+
success_rate: float = 1.0 # 0.0 to 1.0
|
158
|
+
|
159
|
+
# Cost metrics
|
160
|
+
compute_cost_per_hour: float = 0.0
|
161
|
+
network_cost_per_gb: float = 0.0
|
162
|
+
storage_cost_per_gb_month: float = 0.0
|
163
|
+
|
164
|
+
# Timestamp
|
165
|
+
collected_at: datetime = None
|
166
|
+
|
167
|
+
def __post_init__(self):
|
168
|
+
if self.collected_at is None:
|
169
|
+
self.collected_at = datetime.now(UTC)
|
170
|
+
|
171
|
+
|
172
|
+
class EdgeLocation:
|
173
|
+
"""Represents a global edge computing location.
|
174
|
+
|
175
|
+
Each edge location provides compute, storage, and network resources
|
176
|
+
for low-latency data processing and compliance with regional regulations.
|
177
|
+
"""
|
178
|
+
|
179
|
+
def __init__(
|
180
|
+
self,
|
181
|
+
location_id: str,
|
182
|
+
name: str,
|
183
|
+
region: EdgeRegion,
|
184
|
+
coordinates: GeographicCoordinates,
|
185
|
+
capabilities: EdgeCapabilities,
|
186
|
+
compliance_zones: List[ComplianceZone] = None,
|
187
|
+
provider: str = "kailash",
|
188
|
+
endpoint_url: str = None,
|
189
|
+
**metadata,
|
190
|
+
):
|
191
|
+
"""Initialize edge location.
|
192
|
+
|
193
|
+
Args:
|
194
|
+
location_id: Unique identifier for this location
|
195
|
+
name: Human-readable name (e.g., "US-East-Virginia")
|
196
|
+
region: Geographic region
|
197
|
+
coordinates: Latitude/longitude coordinates
|
198
|
+
capabilities: Available compute and network resources
|
199
|
+
compliance_zones: Regulatory compliance zones
|
200
|
+
provider: Cloud provider or infrastructure name
|
201
|
+
endpoint_url: API endpoint for this location
|
202
|
+
**metadata: Additional custom metadata
|
203
|
+
"""
|
204
|
+
self.location_id = location_id
|
205
|
+
self.name = name
|
206
|
+
self.region = region
|
207
|
+
self.coordinates = coordinates
|
208
|
+
self.capabilities = capabilities
|
209
|
+
self.compliance_zones = compliance_zones or [ComplianceZone.PUBLIC]
|
210
|
+
self.provider = provider
|
211
|
+
self.endpoint_url = endpoint_url or f"https://{location_id}.edge.kailash.ai"
|
212
|
+
self.metadata = metadata
|
213
|
+
|
214
|
+
# Runtime state
|
215
|
+
self.status = EdgeStatus.ACTIVE
|
216
|
+
self.metrics = EdgeMetrics()
|
217
|
+
self.connected_users: Set[str] = set()
|
218
|
+
self.active_workloads: Dict[str, Any] = {}
|
219
|
+
self.health_check_failures = 0
|
220
|
+
self.last_health_check = datetime.now(UTC)
|
221
|
+
|
222
|
+
# Cost tracking
|
223
|
+
self.cost_optimizer_enabled = True
|
224
|
+
self._cost_history: List[Dict] = []
|
225
|
+
|
226
|
+
logger.info(
|
227
|
+
f"Initialized edge location {self.name} ({self.location_id}) in {self.region.value}"
|
228
|
+
)
|
229
|
+
|
230
|
+
@property
|
231
|
+
def is_healthy(self) -> bool:
|
232
|
+
"""Check if edge location is healthy and available."""
|
233
|
+
if self.status == EdgeStatus.OFFLINE:
|
234
|
+
return False
|
235
|
+
|
236
|
+
# Check if health checks are failing
|
237
|
+
if self.health_check_failures > 3:
|
238
|
+
return False
|
239
|
+
|
240
|
+
# Check if metrics indicate problems
|
241
|
+
if (
|
242
|
+
self.metrics.cpu_utilization > 0.95
|
243
|
+
or self.metrics.memory_utilization > 0.95
|
244
|
+
or self.metrics.error_rate > 0.1
|
245
|
+
):
|
246
|
+
return False
|
247
|
+
|
248
|
+
return True
|
249
|
+
|
250
|
+
@property
|
251
|
+
def is_available_for_workload(self) -> bool:
|
252
|
+
"""Check if location can accept new workloads."""
|
253
|
+
return (
|
254
|
+
self.is_healthy
|
255
|
+
and self.status in [EdgeStatus.ACTIVE, EdgeStatus.DEGRADED]
|
256
|
+
and self.metrics.cpu_utilization < 0.8
|
257
|
+
)
|
258
|
+
|
259
|
+
def calculate_latency_to(self, user_coordinates: GeographicCoordinates) -> float:
|
260
|
+
"""Estimate network latency to user location in milliseconds.
|
261
|
+
|
262
|
+
Uses geographic distance as a proxy for network latency.
|
263
|
+
Assumes ~1ms per 100km plus base latency.
|
264
|
+
"""
|
265
|
+
distance_km = self.coordinates.distance_to(user_coordinates)
|
266
|
+
|
267
|
+
# Base latency components
|
268
|
+
base_latency = 2.0 # Processing overhead
|
269
|
+
network_latency = distance_km * 0.01 # ~1ms per 100km
|
270
|
+
provider_overhead = 1.0 # CDN/routing overhead
|
271
|
+
|
272
|
+
estimated_latency = base_latency + network_latency + provider_overhead
|
273
|
+
|
274
|
+
# Add current performance degradation
|
275
|
+
if self.status == EdgeStatus.DEGRADED:
|
276
|
+
estimated_latency *= 1.5
|
277
|
+
|
278
|
+
return estimated_latency
|
279
|
+
|
280
|
+
def calculate_cost_for_workload(
|
281
|
+
self,
|
282
|
+
cpu_hours: float = 1.0,
|
283
|
+
memory_gb_hours: float = 1.0,
|
284
|
+
storage_gb: float = 0.0,
|
285
|
+
network_gb: float = 0.0,
|
286
|
+
) -> float:
|
287
|
+
"""Calculate estimated cost for running a workload."""
|
288
|
+
compute_cost = (
|
289
|
+
cpu_hours * self.capabilities.cpu_cores * self.metrics.compute_cost_per_hour
|
290
|
+
)
|
291
|
+
storage_cost = (
|
292
|
+
storage_gb * self.metrics.storage_cost_per_gb_month / (24 * 30)
|
293
|
+
) # Hourly rate
|
294
|
+
network_cost = network_gb * self.metrics.network_cost_per_gb
|
295
|
+
|
296
|
+
total_cost = compute_cost + storage_cost + network_cost
|
297
|
+
|
298
|
+
# Apply regional cost multipliers
|
299
|
+
region_multipliers = {
|
300
|
+
EdgeRegion.US_EAST: 1.0,
|
301
|
+
EdgeRegion.US_WEST: 1.1,
|
302
|
+
EdgeRegion.EU_WEST: 1.2,
|
303
|
+
EdgeRegion.ASIA_EAST: 1.3,
|
304
|
+
EdgeRegion.JAPAN: 1.4,
|
305
|
+
}
|
306
|
+
|
307
|
+
multiplier = region_multipliers.get(self.region, 1.0)
|
308
|
+
return total_cost * multiplier
|
309
|
+
|
310
|
+
def supports_compliance(self, required_zones: List[ComplianceZone]) -> bool:
|
311
|
+
"""Check if location supports required compliance zones."""
|
312
|
+
return all(zone in self.compliance_zones for zone in required_zones)
|
313
|
+
|
314
|
+
def supports_capabilities(self, required_capabilities: Dict[str, Any]) -> bool:
|
315
|
+
"""Check if location supports required capabilities."""
|
316
|
+
for capability, requirement in required_capabilities.items():
|
317
|
+
if capability == "cpu_cores" and self.capabilities.cpu_cores < requirement:
|
318
|
+
return False
|
319
|
+
elif (
|
320
|
+
capability == "memory_gb" and self.capabilities.memory_gb < requirement
|
321
|
+
):
|
322
|
+
return False
|
323
|
+
elif (
|
324
|
+
capability == "gpu_required"
|
325
|
+
and requirement
|
326
|
+
and not self.capabilities.gpu_available
|
327
|
+
):
|
328
|
+
return False
|
329
|
+
elif capability == "database_support":
|
330
|
+
if not all(
|
331
|
+
db in self.capabilities.database_support for db in requirement
|
332
|
+
):
|
333
|
+
return False
|
334
|
+
elif capability == "ai_models":
|
335
|
+
if not all(
|
336
|
+
model in self.capabilities.ai_models_available
|
337
|
+
for model in requirement
|
338
|
+
):
|
339
|
+
return False
|
340
|
+
|
341
|
+
return True
|
342
|
+
|
343
|
+
async def health_check(self) -> bool:
|
344
|
+
"""Perform health check on edge location."""
|
345
|
+
try:
|
346
|
+
# Simulate health check (in production, would ping actual endpoint)
|
347
|
+
await asyncio.sleep(0.1) # Simulate network call
|
348
|
+
|
349
|
+
# Update metrics (in production, would fetch real metrics)
|
350
|
+
self.metrics.collected_at = datetime.now(UTC)
|
351
|
+
|
352
|
+
# Reset failure counter on success
|
353
|
+
self.health_check_failures = 0
|
354
|
+
self.last_health_check = datetime.now(UTC)
|
355
|
+
|
356
|
+
logger.debug(f"Health check passed for {self.name}")
|
357
|
+
return True
|
358
|
+
|
359
|
+
except Exception as e:
|
360
|
+
self.health_check_failures += 1
|
361
|
+
logger.warning(f"Health check failed for {self.name}: {e}")
|
362
|
+
|
363
|
+
# Mark as degraded if failing repeatedly
|
364
|
+
if self.health_check_failures > 3:
|
365
|
+
self.status = EdgeStatus.DEGRADED
|
366
|
+
|
367
|
+
return False
|
368
|
+
|
369
|
+
async def update_metrics(self, new_metrics: EdgeMetrics):
|
370
|
+
"""Update location metrics."""
|
371
|
+
self.metrics = new_metrics
|
372
|
+
|
373
|
+
# Automatic status updates based on metrics
|
374
|
+
if self.metrics.error_rate > 0.2:
|
375
|
+
self.status = EdgeStatus.DEGRADED
|
376
|
+
elif self.metrics.uptime_percentage < 95.0:
|
377
|
+
self.status = EdgeStatus.DEGRADED
|
378
|
+
elif self.metrics.cpu_utilization > 0.98:
|
379
|
+
self.status = EdgeStatus.DEGRADED
|
380
|
+
else:
|
381
|
+
# Recovery to active if metrics improve
|
382
|
+
if self.status == EdgeStatus.DEGRADED:
|
383
|
+
self.status = EdgeStatus.ACTIVE
|
384
|
+
|
385
|
+
def add_workload(self, workload_id: str, workload_config: Dict[str, Any]):
|
386
|
+
"""Register a new workload at this location."""
|
387
|
+
self.active_workloads[workload_id] = {
|
388
|
+
"config": workload_config,
|
389
|
+
"started_at": datetime.now(UTC),
|
390
|
+
"status": "running",
|
391
|
+
}
|
392
|
+
logger.info(f"Added workload {workload_id} to {self.name}")
|
393
|
+
|
394
|
+
def remove_workload(self, workload_id: str):
|
395
|
+
"""Remove workload from this location."""
|
396
|
+
if workload_id in self.active_workloads:
|
397
|
+
del self.active_workloads[workload_id]
|
398
|
+
logger.info(f"Removed workload {workload_id} from {self.name}")
|
399
|
+
|
400
|
+
def get_load_factor(self) -> float:
|
401
|
+
"""Calculate current load factor (0.0 to 1.0)."""
|
402
|
+
# Weighted combination of resource utilization
|
403
|
+
cpu_weight = 0.4
|
404
|
+
memory_weight = 0.3
|
405
|
+
workload_weight = 0.3
|
406
|
+
|
407
|
+
# Workload factor based on active workloads vs capacity
|
408
|
+
max_workloads = self.capabilities.cpu_cores * 4 # Assume 4 workloads per core
|
409
|
+
workload_factor = len(self.active_workloads) / max_workloads
|
410
|
+
|
411
|
+
load_factor = (
|
412
|
+
self.metrics.cpu_utilization * cpu_weight
|
413
|
+
+ self.metrics.memory_utilization * memory_weight
|
414
|
+
+ min(workload_factor, 1.0) * workload_weight
|
415
|
+
)
|
416
|
+
|
417
|
+
return min(load_factor, 1.0)
|
418
|
+
|
419
|
+
def to_dict(self) -> Dict[str, Any]:
|
420
|
+
"""Convert edge location to dictionary representation."""
|
421
|
+
return {
|
422
|
+
"location_id": self.location_id,
|
423
|
+
"name": self.name,
|
424
|
+
"region": self.region.value,
|
425
|
+
"coordinates": {
|
426
|
+
"latitude": self.coordinates.latitude,
|
427
|
+
"longitude": self.coordinates.longitude,
|
428
|
+
},
|
429
|
+
"capabilities": {
|
430
|
+
"cpu_cores": self.capabilities.cpu_cores,
|
431
|
+
"memory_gb": self.capabilities.memory_gb,
|
432
|
+
"storage_gb": self.capabilities.storage_gb,
|
433
|
+
"gpu_available": self.capabilities.gpu_available,
|
434
|
+
"gpu_type": self.capabilities.gpu_type,
|
435
|
+
"bandwidth_gbps": self.capabilities.bandwidth_gbps,
|
436
|
+
"database_support": self.capabilities.database_support,
|
437
|
+
"ai_models_available": self.capabilities.ai_models_available,
|
438
|
+
},
|
439
|
+
"compliance_zones": [zone.value for zone in self.compliance_zones],
|
440
|
+
"status": self.status.value,
|
441
|
+
"provider": self.provider,
|
442
|
+
"endpoint_url": self.endpoint_url,
|
443
|
+
"metrics": {
|
444
|
+
"cpu_utilization": self.metrics.cpu_utilization,
|
445
|
+
"memory_utilization": self.metrics.memory_utilization,
|
446
|
+
"latency_p95_ms": self.metrics.latency_p95_ms,
|
447
|
+
"uptime_percentage": self.metrics.uptime_percentage,
|
448
|
+
"error_rate": self.metrics.error_rate,
|
449
|
+
"collected_at": (
|
450
|
+
self.metrics.collected_at.isoformat()
|
451
|
+
if self.metrics.collected_at
|
452
|
+
else None
|
453
|
+
),
|
454
|
+
},
|
455
|
+
"is_healthy": self.is_healthy,
|
456
|
+
"is_available": self.is_available_for_workload,
|
457
|
+
"load_factor": self.get_load_factor(),
|
458
|
+
"active_workloads": len(self.active_workloads),
|
459
|
+
"metadata": self.metadata,
|
460
|
+
}
|
461
|
+
|
462
|
+
@classmethod
|
463
|
+
def from_dict(cls, data: Dict[str, Any]) -> "EdgeLocation":
|
464
|
+
"""Create EdgeLocation from dictionary."""
|
465
|
+
coordinates = GeographicCoordinates(
|
466
|
+
latitude=data["coordinates"]["latitude"],
|
467
|
+
longitude=data["coordinates"]["longitude"],
|
468
|
+
)
|
469
|
+
|
470
|
+
capabilities_data = data["capabilities"]
|
471
|
+
capabilities = EdgeCapabilities(
|
472
|
+
cpu_cores=capabilities_data["cpu_cores"],
|
473
|
+
memory_gb=capabilities_data["memory_gb"],
|
474
|
+
storage_gb=capabilities_data["storage_gb"],
|
475
|
+
gpu_available=capabilities_data.get("gpu_available", False),
|
476
|
+
gpu_type=capabilities_data.get("gpu_type"),
|
477
|
+
bandwidth_gbps=capabilities_data.get("bandwidth_gbps", 1.0),
|
478
|
+
database_support=capabilities_data.get(
|
479
|
+
"database_support", ["postgresql", "redis"]
|
480
|
+
),
|
481
|
+
ai_models_available=capabilities_data.get("ai_models_available", []),
|
482
|
+
)
|
483
|
+
|
484
|
+
compliance_zones = [
|
485
|
+
ComplianceZone(zone) for zone in data.get("compliance_zones", ["public"])
|
486
|
+
]
|
487
|
+
|
488
|
+
location = cls(
|
489
|
+
location_id=data["location_id"],
|
490
|
+
name=data["name"],
|
491
|
+
region=EdgeRegion(data["region"]),
|
492
|
+
coordinates=coordinates,
|
493
|
+
capabilities=capabilities,
|
494
|
+
compliance_zones=compliance_zones,
|
495
|
+
provider=data.get("provider", "kailash"),
|
496
|
+
endpoint_url=data.get("endpoint_url"),
|
497
|
+
**data.get("metadata", {}),
|
498
|
+
)
|
499
|
+
|
500
|
+
# Restore status if provided
|
501
|
+
if "status" in data:
|
502
|
+
location.status = EdgeStatus(data["status"])
|
503
|
+
|
504
|
+
return location
|
505
|
+
|
506
|
+
def __str__(self) -> str:
|
507
|
+
return f"EdgeLocation({self.name}, {self.region.value}, {self.status.value})"
|
508
|
+
|
509
|
+
def __repr__(self) -> str:
|
510
|
+
return (
|
511
|
+
f"EdgeLocation(location_id='{self.location_id}', name='{self.name}', "
|
512
|
+
f"region={self.region}, status={self.status})"
|
513
|
+
)
|
514
|
+
|
515
|
+
|
516
|
+
# Predefined edge locations for common deployments
|
517
|
+
PREDEFINED_LOCATIONS = {
|
518
|
+
"us-east-1": EdgeLocation(
|
519
|
+
location_id="us-east-1",
|
520
|
+
name="US East (Virginia)",
|
521
|
+
region=EdgeRegion.US_EAST,
|
522
|
+
coordinates=GeographicCoordinates(39.0458, -77.5081),
|
523
|
+
capabilities=EdgeCapabilities(
|
524
|
+
cpu_cores=16,
|
525
|
+
memory_gb=64,
|
526
|
+
storage_gb=1000,
|
527
|
+
gpu_available=True,
|
528
|
+
gpu_type="NVIDIA A100",
|
529
|
+
bandwidth_gbps=10.0,
|
530
|
+
database_support=["postgresql", "mongodb", "redis"],
|
531
|
+
ai_models_available=["llama3.2", "gpt-4", "claude-3"],
|
532
|
+
),
|
533
|
+
compliance_zones=[
|
534
|
+
ComplianceZone.PUBLIC,
|
535
|
+
ComplianceZone.HIPAA,
|
536
|
+
ComplianceZone.SOX,
|
537
|
+
],
|
538
|
+
),
|
539
|
+
"eu-west-1": EdgeLocation(
|
540
|
+
location_id="eu-west-1",
|
541
|
+
name="EU West (Ireland)",
|
542
|
+
region=EdgeRegion.EU_WEST,
|
543
|
+
coordinates=GeographicCoordinates(53.3498, -6.2603),
|
544
|
+
capabilities=EdgeCapabilities(
|
545
|
+
cpu_cores=12,
|
546
|
+
memory_gb=48,
|
547
|
+
storage_gb=800,
|
548
|
+
gpu_available=True,
|
549
|
+
gpu_type="NVIDIA V100",
|
550
|
+
bandwidth_gbps=5.0,
|
551
|
+
database_support=["postgresql", "redis"],
|
552
|
+
ai_models_available=["llama3.2", "claude-3"],
|
553
|
+
),
|
554
|
+
compliance_zones=[ComplianceZone.GDPR, ComplianceZone.PUBLIC],
|
555
|
+
),
|
556
|
+
"asia-east-1": EdgeLocation(
|
557
|
+
location_id="asia-east-1",
|
558
|
+
name="Asia East (Tokyo)",
|
559
|
+
region=EdgeRegion.JAPAN,
|
560
|
+
coordinates=GeographicCoordinates(35.6762, 139.6503),
|
561
|
+
capabilities=EdgeCapabilities(
|
562
|
+
cpu_cores=8,
|
563
|
+
memory_gb=32,
|
564
|
+
storage_gb=500,
|
565
|
+
gpu_available=False,
|
566
|
+
bandwidth_gbps=3.0,
|
567
|
+
database_support=["postgresql", "redis"],
|
568
|
+
ai_models_available=["llama3.2"],
|
569
|
+
),
|
570
|
+
compliance_zones=[ComplianceZone.PUBLIC],
|
571
|
+
),
|
572
|
+
}
|
573
|
+
|
574
|
+
|
575
|
+
def get_predefined_location(location_id: str) -> Optional[EdgeLocation]:
|
576
|
+
"""Get a predefined edge location by ID."""
|
577
|
+
return PREDEFINED_LOCATIONS.get(location_id)
|
578
|
+
|
579
|
+
|
580
|
+
def list_predefined_locations() -> List[EdgeLocation]:
|
581
|
+
"""Get all predefined edge locations."""
|
582
|
+
return list(PREDEFINED_LOCATIONS.values())
|
@@ -0,0 +1,33 @@
|
|
1
|
+
"""Enhanced Gateway Integration for Kailash SDK.
|
2
|
+
|
3
|
+
This module provides:
|
4
|
+
- Resource reference support in JSON API
|
5
|
+
- Secret management with encryption
|
6
|
+
- Enhanced client SDK for async workflows
|
7
|
+
- Production-ready gateway for complex deployments
|
8
|
+
"""
|
9
|
+
|
10
|
+
from .api import WorkflowRequestModel, WorkflowResponseModel, create_gateway_app
|
11
|
+
from .enhanced_gateway import (
|
12
|
+
EnhancedDurableAPIGateway,
|
13
|
+
WorkflowNotFoundError,
|
14
|
+
WorkflowRequest,
|
15
|
+
WorkflowResponse,
|
16
|
+
)
|
17
|
+
from .resource_resolver import ResourceReference, ResourceResolver
|
18
|
+
from .security import SecretBackend, SecretManager, SecretNotFoundError
|
19
|
+
|
20
|
+
__all__ = [
|
21
|
+
"EnhancedDurableAPIGateway",
|
22
|
+
"WorkflowRequest",
|
23
|
+
"WorkflowResponse",
|
24
|
+
"WorkflowNotFoundError",
|
25
|
+
"ResourceReference",
|
26
|
+
"ResourceResolver",
|
27
|
+
"SecretManager",
|
28
|
+
"SecretBackend",
|
29
|
+
"SecretNotFoundError",
|
30
|
+
"create_gateway_app",
|
31
|
+
"WorkflowRequestModel",
|
32
|
+
"WorkflowResponseModel",
|
33
|
+
]
|