xenfra-sdk 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +61 -21
- xenfra_sdk/cli/main.py +226 -226
- xenfra_sdk/client.py +90 -90
- xenfra_sdk/config.py +26 -26
- xenfra_sdk/db/models.py +24 -24
- xenfra_sdk/db/session.py +30 -30
- xenfra_sdk/dependencies.py +39 -39
- xenfra_sdk/detection.py +396 -0
- xenfra_sdk/dockerizer.py +195 -194
- xenfra_sdk/engine.py +741 -619
- xenfra_sdk/exceptions.py +19 -19
- xenfra_sdk/manifest.py +212 -0
- xenfra_sdk/mcp_client.py +154 -154
- xenfra_sdk/models.py +184 -184
- xenfra_sdk/orchestrator.py +666 -0
- xenfra_sdk/patterns.json +13 -13
- xenfra_sdk/privacy.py +153 -153
- xenfra_sdk/recipes.py +26 -26
- xenfra_sdk/resources/base.py +3 -3
- xenfra_sdk/resources/deployments.py +278 -248
- xenfra_sdk/resources/files.py +101 -101
- xenfra_sdk/resources/intelligence.py +102 -95
- xenfra_sdk/security.py +41 -41
- xenfra_sdk/security_scanner.py +431 -0
- xenfra_sdk/templates/Caddyfile.j2 +14 -0
- xenfra_sdk/templates/Dockerfile.j2 +41 -38
- xenfra_sdk/templates/cloud-init.sh.j2 +90 -90
- xenfra_sdk/templates/docker-compose-multi.yml.j2 +29 -0
- xenfra_sdk/templates/docker-compose.yml.j2 +30 -30
- xenfra_sdk-0.2.4.dist-info/METADATA +116 -0
- xenfra_sdk-0.2.4.dist-info/RECORD +38 -0
- xenfra_sdk-0.2.2.dist-info/METADATA +0 -118
- xenfra_sdk-0.2.2.dist-info/RECORD +0 -32
- {xenfra_sdk-0.2.2.dist-info → xenfra_sdk-0.2.4.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,666 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Xenfra Service Orchestrator - Multi-service deployment orchestration.
|
|
3
|
+
|
|
4
|
+
This module handles the deployment of multiple services on single or multiple droplets,
|
|
5
|
+
coordinating the generation of docker-compose.yml, Caddyfile, and health checks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Callable, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
import yaml
|
|
13
|
+
|
|
14
|
+
from .manifest import ServiceDefinition, load_services_from_xenfra_yaml
|
|
15
|
+
from . import dockerizer
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ServiceOrchestrator:
|
|
19
|
+
"""
|
|
20
|
+
Orchestrates multi-service deployments on DigitalOcean.
|
|
21
|
+
|
|
22
|
+
Supports two deployment modes:
|
|
23
|
+
- single-droplet: All services on one machine with Caddy routing
|
|
24
|
+
- multi-droplet: Each service on separate droplets with private networking
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, engine, services: List[ServiceDefinition], project_name: str, mode: str = "single-droplet", file_manifest: List[dict] = None):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the orchestrator.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
engine: InfraEngine instance
|
|
33
|
+
services: List of ServiceDefinition instances
|
|
34
|
+
project_name: Name of the project
|
|
35
|
+
mode: Deployment mode (single-droplet or multi-droplet)
|
|
36
|
+
file_manifest: List of files to be uploaded (delta upload)
|
|
37
|
+
"""
|
|
38
|
+
self.engine = engine
|
|
39
|
+
self.services = services
|
|
40
|
+
self.project_name = project_name
|
|
41
|
+
self.mode = mode
|
|
42
|
+
self.file_manifest = file_manifest or []
|
|
43
|
+
self.token = engine.token
|
|
44
|
+
self.manager = engine.manager
|
|
45
|
+
|
|
46
|
+
def deploy(self, logger: Callable = print, **kwargs) -> Dict:
|
|
47
|
+
"""
|
|
48
|
+
Deploy services using the configured mode.
|
|
49
|
+
|
|
50
|
+
Automatically selects between single-droplet and multi-droplet
|
|
51
|
+
based on self.mode.
|
|
52
|
+
"""
|
|
53
|
+
if self.mode == "multi-droplet":
|
|
54
|
+
return self.deploy_multi_droplet(logger=logger, **kwargs)
|
|
55
|
+
else:
|
|
56
|
+
return self.deploy_single_droplet(logger=logger, **kwargs)
|
|
57
|
+
|
|
58
|
+
def deploy_single_droplet(
|
|
59
|
+
self,
|
|
60
|
+
logger: Callable = print,
|
|
61
|
+
**kwargs
|
|
62
|
+
) -> Dict:
|
|
63
|
+
"""
|
|
64
|
+
Deploy all services on a single droplet with Caddy routing.
|
|
65
|
+
|
|
66
|
+
This is the cost-effective option for development and small projects.
|
|
67
|
+
All services run on one machine, communicating via localhost.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
logger: Logging function (Rich-compatible)
|
|
71
|
+
**kwargs: Additional parameters passed to InfraEngine
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Dict with deployment result:
|
|
75
|
+
{
|
|
76
|
+
"droplet": Droplet object,
|
|
77
|
+
"services": {"svc1": True, "svc2": False, ...},
|
|
78
|
+
"status": "SUCCESS" | "PARTIAL" | "FAILED",
|
|
79
|
+
"url": "http://ip-address"
|
|
80
|
+
}
|
|
81
|
+
"""
|
|
82
|
+
logger(f"[bold blue]🚀 Deploying {len(self.services)} services (single-droplet mode)[/bold blue]")
|
|
83
|
+
|
|
84
|
+
for svc in self.services:
|
|
85
|
+
logger(f" - {svc.name} (port {svc.port})")
|
|
86
|
+
|
|
87
|
+
# Step 1: Calculate droplet size based on service count
|
|
88
|
+
size = self._calculate_droplet_size()
|
|
89
|
+
logger(f"\n[dim]Recommended droplet size: {size}[/dim]")
|
|
90
|
+
|
|
91
|
+
# Step 2: Generate multi-service docker-compose.yml
|
|
92
|
+
compose_content = self._generate_docker_compose()
|
|
93
|
+
logger(f"[dim]Generated docker-compose.yml ({len(compose_content)} bytes)[/dim]")
|
|
94
|
+
|
|
95
|
+
# Step 3: Generate Caddyfile for path-based routing
|
|
96
|
+
caddy_content = self._generate_caddyfile()
|
|
97
|
+
logger(f"[dim]Generated Caddyfile ({len(caddy_content)} bytes)[/dim]")
|
|
98
|
+
|
|
99
|
+
# Step 4: Deploy using InfraEngine (with modifications for multi-service)
|
|
100
|
+
# We'll use the first service's config as the base
|
|
101
|
+
primary_service = self.services[0]
|
|
102
|
+
|
|
103
|
+
result = {
|
|
104
|
+
"droplet": None,
|
|
105
|
+
"services": {},
|
|
106
|
+
"status": "FAILED",
|
|
107
|
+
"url": None,
|
|
108
|
+
"compose_content": compose_content,
|
|
109
|
+
"caddy_content": caddy_content,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
# Use InfraEngine's deploy_server but with our multi-service assets
|
|
114
|
+
|
|
115
|
+
# Helper to sanitize kwargs preventing "multiple values" TypeError
|
|
116
|
+
safe_kwargs = kwargs.copy()
|
|
117
|
+
explicit_args = [
|
|
118
|
+
"name", "size", "framework", "port", "is_dockerized",
|
|
119
|
+
"entrypoint", "multi_service_compose", "multi_service_caddy",
|
|
120
|
+
"services", "logger", "extra_assets"
|
|
121
|
+
]
|
|
122
|
+
for arg in explicit_args:
|
|
123
|
+
safe_kwargs.pop(arg, None)
|
|
124
|
+
|
|
125
|
+
# Generate Dockerfiles for each service
|
|
126
|
+
extra_assets = {}
|
|
127
|
+
for svc in self.services:
|
|
128
|
+
dockerfile_name = f"Dockerfile.{svc.name}"
|
|
129
|
+
|
|
130
|
+
# Check if user already provided this file (or standard Dockerfile if in subfolder)
|
|
131
|
+
has_existing = False
|
|
132
|
+
if self.file_manifest:
|
|
133
|
+
# Check for explicit Dockerfile.svcname
|
|
134
|
+
if any(f.get("path") in [dockerfile_name, f"./{dockerfile_name}"] for f in self.file_manifest):
|
|
135
|
+
has_existing = True
|
|
136
|
+
logger(f" [dim]Using existing {dockerfile_name}[/dim]")
|
|
137
|
+
|
|
138
|
+
# Check for standard Dockerfile if service has its own directory
|
|
139
|
+
elif svc.path and svc.path != ".":
|
|
140
|
+
std_path = f"{svc.path}/Dockerfile"
|
|
141
|
+
if any(f.get("path") in [std_path, f"./{std_path}"] for f in self.file_manifest):
|
|
142
|
+
# If standard Dockerfile exists in subfolder, we don't need to generate one
|
|
143
|
+
# BUT we must ensure docker-compose points to it.
|
|
144
|
+
# _generate_docker_compose forces "Dockerfile.svcname".
|
|
145
|
+
# This is a potential conflict.
|
|
146
|
+
# For now, we only skip if the EXACT filename matches what we expect.
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
if has_existing and not svc.missing_deps:
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
if has_existing and svc.missing_deps:
|
|
153
|
+
# Injection logic for existing Dockerfile
|
|
154
|
+
logger(f" - [Zen Mode] Injecting {len(svc.missing_deps)} missing deps into existing {dockerfile_name}")
|
|
155
|
+
|
|
156
|
+
# Find the file in manifest and get its content
|
|
157
|
+
existing_finfo = next(f for f in self.file_manifest if f.get("path") in [dockerfile_name, f"./{dockerfile_name}", f"{svc.path}/Dockerfile" if (svc.path and svc.path != ".") else "Dockerfile"])
|
|
158
|
+
from .engine import DeploymentError
|
|
159
|
+
if not self.engine.get_file_content:
|
|
160
|
+
raise DeploymentError("Cannot inject deps into existing Dockerfile: get_file_content not available", stage="Asset Generation")
|
|
161
|
+
|
|
162
|
+
content_bytes = self.engine.get_file_content(existing_finfo["sha"])
|
|
163
|
+
content = content_bytes.decode("utf-8", errors="ignore")
|
|
164
|
+
|
|
165
|
+
# Append injection block
|
|
166
|
+
deps_str = " ".join(svc.missing_deps)
|
|
167
|
+
injection_block = f"\n\n# --- Xenfra Zen Mode: Auto-heal missing dependencies ---\nRUN pip install --no-cache-dir {deps_str}\n"
|
|
168
|
+
|
|
169
|
+
# If it's a multi-stage build or has entrypoint/cmd, try to insert before it
|
|
170
|
+
# otherwise just append
|
|
171
|
+
if "ENTRYPOINT" in content:
|
|
172
|
+
parts = content.split("ENTRYPOINT", 1)
|
|
173
|
+
content = parts[0] + injection_block + "ENTRYPOINT" + parts[1]
|
|
174
|
+
elif "CMD" in content:
|
|
175
|
+
parts = content.split("CMD", 1)
|
|
176
|
+
content = parts[0] + injection_block + "CMD" + parts[1]
|
|
177
|
+
else:
|
|
178
|
+
content += injection_block
|
|
179
|
+
|
|
180
|
+
extra_assets[dockerfile_name] = content
|
|
181
|
+
continue
|
|
182
|
+
|
|
183
|
+
# Determine command (same logic as _generate_docker_compose)
|
|
184
|
+
command = svc.command
|
|
185
|
+
if not command and svc.entrypoint:
|
|
186
|
+
if svc.framework == "fastapi":
|
|
187
|
+
command = f"uvicorn {svc.entrypoint} --host 0.0.0.0 --port {svc.port}"
|
|
188
|
+
elif svc.framework == "flask":
|
|
189
|
+
command = f"gunicorn {svc.entrypoint} -b 0.0.0.0:{svc.port}"
|
|
190
|
+
elif svc.framework == "django":
|
|
191
|
+
command = f"gunicorn {svc.entrypoint} --bind 0.0.0.0:{svc.port}"
|
|
192
|
+
|
|
193
|
+
# Render assets using dockerizer
|
|
194
|
+
ctx = {
|
|
195
|
+
"framework": svc.framework,
|
|
196
|
+
"port": svc.port,
|
|
197
|
+
"command": command,
|
|
198
|
+
"missing_deps": svc.missing_deps,
|
|
199
|
+
# Pass through other potential context
|
|
200
|
+
"database": None,
|
|
201
|
+
}
|
|
202
|
+
assets = dockerizer.render_deployment_assets(ctx)
|
|
203
|
+
if "Dockerfile" in assets:
|
|
204
|
+
extra_assets[dockerfile_name] = assets["Dockerfile"]
|
|
205
|
+
|
|
206
|
+
if extra_assets:
|
|
207
|
+
logger(f"[dim]Generated {len(extra_assets)} service Dockerfiles[/dim]")
|
|
208
|
+
|
|
209
|
+
deployment_result = self.engine.deploy_server(
|
|
210
|
+
name=self.project_name,
|
|
211
|
+
size=size,
|
|
212
|
+
framework=primary_service.framework,
|
|
213
|
+
port=80, # Caddy listens on 80
|
|
214
|
+
is_dockerized=True,
|
|
215
|
+
entrypoint=None, # We'll use docker-compose
|
|
216
|
+
# Pass multi-service config
|
|
217
|
+
multi_service_compose=compose_content,
|
|
218
|
+
multi_service_caddy=caddy_content,
|
|
219
|
+
extra_assets=extra_assets, # Pass generated Dockerfiles
|
|
220
|
+
services=self.services,
|
|
221
|
+
logger=logger,
|
|
222
|
+
**safe_kwargs
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
result["droplet"] = deployment_result.get("droplet")
|
|
226
|
+
result["url"] = f"http://{deployment_result.get('ip_address')}"
|
|
227
|
+
|
|
228
|
+
# Step 5: Health check all services
|
|
229
|
+
if result["droplet"]:
|
|
230
|
+
result["services"] = self._health_check_all_services(
|
|
231
|
+
result["droplet"].ip_address,
|
|
232
|
+
logger=logger
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Determine overall status
|
|
236
|
+
healthy_count = sum(1 for v in result["services"].values() if v)
|
|
237
|
+
total_count = len(result["services"])
|
|
238
|
+
|
|
239
|
+
if healthy_count == total_count:
|
|
240
|
+
result["status"] = "SUCCESS"
|
|
241
|
+
logger(f"\n[bold green]✨ All {total_count} services healthy![/bold green]")
|
|
242
|
+
elif healthy_count > 0:
|
|
243
|
+
result["status"] = "PARTIAL"
|
|
244
|
+
logger(f"\n[yellow]⚠ {healthy_count}/{total_count} services healthy[/yellow]")
|
|
245
|
+
else:
|
|
246
|
+
result["status"] = "FAILED"
|
|
247
|
+
logger(f"\n[bold red]❌ All services failed health check[/bold red]")
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger(f"[bold red]Deployment failed: {e}[/bold red]")
|
|
251
|
+
result["error"] = str(e)
|
|
252
|
+
|
|
253
|
+
return result
|
|
254
|
+
|
|
255
|
+
def _calculate_droplet_size(self) -> str:
|
|
256
|
+
"""
|
|
257
|
+
Recommend droplet size based on number of services.
|
|
258
|
+
|
|
259
|
+
Guidelines:
|
|
260
|
+
- 1-2 services: s-1vcpu-2gb ($12/month)
|
|
261
|
+
- 3-5 services: s-2vcpu-4gb ($24/month)
|
|
262
|
+
- 6+ services: s-4vcpu-8gb ($48/month)
|
|
263
|
+
"""
|
|
264
|
+
service_count = len(self.services)
|
|
265
|
+
|
|
266
|
+
if service_count <= 2:
|
|
267
|
+
return "s-1vcpu-2gb"
|
|
268
|
+
elif service_count <= 5:
|
|
269
|
+
return "s-2vcpu-4gb"
|
|
270
|
+
else:
|
|
271
|
+
return "s-4vcpu-8gb"
|
|
272
|
+
|
|
273
|
+
def _generate_docker_compose(self) -> str:
|
|
274
|
+
"""
|
|
275
|
+
Generate docker-compose.yml for all services.
|
|
276
|
+
|
|
277
|
+
Each service gets:
|
|
278
|
+
- Its own container
|
|
279
|
+
- Port mapping
|
|
280
|
+
- Environment variables
|
|
281
|
+
- Restart policy
|
|
282
|
+
"""
|
|
283
|
+
services_config = {}
|
|
284
|
+
|
|
285
|
+
for svc in self.services:
|
|
286
|
+
# Build command based on framework and entrypoint
|
|
287
|
+
command = svc.command
|
|
288
|
+
if not command and svc.entrypoint:
|
|
289
|
+
if svc.framework == "fastapi":
|
|
290
|
+
command = f"uvicorn {svc.entrypoint} --host 0.0.0.0 --port {svc.port}"
|
|
291
|
+
elif svc.framework == "flask":
|
|
292
|
+
command = f"gunicorn {svc.entrypoint} -b 0.0.0.0:{svc.port}"
|
|
293
|
+
elif svc.framework == "django":
|
|
294
|
+
command = f"gunicorn {svc.entrypoint} --bind 0.0.0.0:{svc.port}"
|
|
295
|
+
|
|
296
|
+
service_entry = {
|
|
297
|
+
"build": {
|
|
298
|
+
"context": svc.path or ".",
|
|
299
|
+
"dockerfile": f"Dockerfile.{svc.name}"
|
|
300
|
+
},
|
|
301
|
+
"ports": [f"{svc.port}:{svc.port}"],
|
|
302
|
+
"restart": "unless-stopped",
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if command:
|
|
306
|
+
service_entry["command"] = command
|
|
307
|
+
|
|
308
|
+
if svc.env:
|
|
309
|
+
service_entry["environment"] = svc.env
|
|
310
|
+
|
|
311
|
+
services_config[svc.name] = service_entry
|
|
312
|
+
|
|
313
|
+
compose = {
|
|
314
|
+
"services": services_config
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
return yaml.dump(compose, default_flow_style=False, sort_keys=False)
|
|
318
|
+
|
|
319
|
+
def _generate_caddyfile(self) -> str:
|
|
320
|
+
"""
|
|
321
|
+
Generate Caddyfile for path-based routing.
|
|
322
|
+
|
|
323
|
+
Routes:
|
|
324
|
+
- /<service-name>/* → localhost:<service-port> (Strip prefix)
|
|
325
|
+
- / → Gateway info page (Exact match only)
|
|
326
|
+
"""
|
|
327
|
+
routes = []
|
|
328
|
+
|
|
329
|
+
for svc in self.services:
|
|
330
|
+
# handle_path strips the prefix automatically
|
|
331
|
+
route = f""" handle_path /{svc.name}* {{
|
|
332
|
+
reverse_proxy localhost:{svc.port}
|
|
333
|
+
}}"""
|
|
334
|
+
routes.append(route)
|
|
335
|
+
|
|
336
|
+
caddyfile = f""":80 {{
|
|
337
|
+
{chr(10).join(routes)}
|
|
338
|
+
|
|
339
|
+
handle / {{
|
|
340
|
+
respond "Xenfra Gateway - {self.project_name}" 200
|
|
341
|
+
}}
|
|
342
|
+
|
|
343
|
+
handle {{
|
|
344
|
+
respond "Not Found" 404
|
|
345
|
+
}}
|
|
346
|
+
}}"""
|
|
347
|
+
|
|
348
|
+
return caddyfile
|
|
349
|
+
|
|
350
|
+
def _health_check_all_services(
|
|
351
|
+
self,
|
|
352
|
+
ip_address: str,
|
|
353
|
+
logger: Callable = print,
|
|
354
|
+
max_attempts: int = 3,
|
|
355
|
+
delay_seconds: int = 10
|
|
356
|
+
) -> Dict[str, bool]:
|
|
357
|
+
"""
|
|
358
|
+
Check health of all services via their routed paths.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
ip_address: Droplet IP address
|
|
362
|
+
logger: Logging function
|
|
363
|
+
max_attempts: Number of retry attempts per service
|
|
364
|
+
delay_seconds: Delay between retries
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Dict mapping service name → healthy (bool)
|
|
368
|
+
"""
|
|
369
|
+
import requests
|
|
370
|
+
|
|
371
|
+
results = {}
|
|
372
|
+
|
|
373
|
+
logger("\n[cyan]Running health checks...[/cyan]")
|
|
374
|
+
|
|
375
|
+
for svc in self.services:
|
|
376
|
+
endpoint = f"http://{ip_address}/{svc.name}/"
|
|
377
|
+
healthy = False
|
|
378
|
+
|
|
379
|
+
for attempt in range(max_attempts):
|
|
380
|
+
try:
|
|
381
|
+
response = requests.get(endpoint, timeout=5)
|
|
382
|
+
# Accept any HTTP response as healthy (200, 404, 500, etc.)
|
|
383
|
+
if response.status_code >= 100:
|
|
384
|
+
healthy = True
|
|
385
|
+
logger(f" ✓ {svc.name} (port {svc.port})")
|
|
386
|
+
break
|
|
387
|
+
except requests.RequestException:
|
|
388
|
+
pass
|
|
389
|
+
|
|
390
|
+
if attempt < max_attempts - 1:
|
|
391
|
+
time.sleep(delay_seconds)
|
|
392
|
+
|
|
393
|
+
if not healthy:
|
|
394
|
+
logger(f" ✗ {svc.name} (port {svc.port}) - failed")
|
|
395
|
+
|
|
396
|
+
results[svc.name] = healthy
|
|
397
|
+
|
|
398
|
+
return results
|
|
399
|
+
|
|
400
|
+
def deploy_multi_droplet(self, logger: Callable = print, **kwargs) -> Dict:
|
|
401
|
+
"""
|
|
402
|
+
Deploy each service on its own droplet with private networking.
|
|
403
|
+
|
|
404
|
+
This is the scalable option for production workloads.
|
|
405
|
+
Each service runs on a dedicated droplet with private IP communication.
|
|
406
|
+
|
|
407
|
+
Architecture:
|
|
408
|
+
- Gateway droplet: Runs Caddy and routes traffic to service droplets
|
|
409
|
+
- Service droplets: Each runs one service, accessible via private IP
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Dict with deployment result:
|
|
413
|
+
{
|
|
414
|
+
"gateway": Droplet object,
|
|
415
|
+
"droplets": {"svc1": Droplet, "svc2": Droplet, ...},
|
|
416
|
+
"services": {"svc1": True, "svc2": False, ...},
|
|
417
|
+
"status": "SUCCESS" | "PARTIAL" | "FAILED",
|
|
418
|
+
"url": "http://gateway-ip"
|
|
419
|
+
}
|
|
420
|
+
"""
|
|
421
|
+
logger(f"[bold blue]🚀 Deploying {len(self.services)} services (multi-droplet mode)[/bold blue]")
|
|
422
|
+
|
|
423
|
+
for svc in self.services:
|
|
424
|
+
logger(f" - {svc.name} (port {svc.port})")
|
|
425
|
+
|
|
426
|
+
logger(f"\n[dim]This will create {len(self.services) + 1} droplets (1 gateway + {len(self.services)} services)[/dim]")
|
|
427
|
+
|
|
428
|
+
result = {
|
|
429
|
+
"gateway": None,
|
|
430
|
+
"droplets": {},
|
|
431
|
+
"private_ips": {},
|
|
432
|
+
"services": {},
|
|
433
|
+
"status": "FAILED",
|
|
434
|
+
"url": None,
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
created_droplets = []
|
|
438
|
+
cleanup_on_failure = kwargs.get("cleanup_on_failure", False)
|
|
439
|
+
|
|
440
|
+
try:
|
|
441
|
+
# Step 1: Deploy each service on its own droplet
|
|
442
|
+
logger("\n[bold cyan]Phase 1: Deploying service droplets[/bold cyan]")
|
|
443
|
+
|
|
444
|
+
# Common sanitization for service droplets
|
|
445
|
+
service_kwargs = kwargs.copy()
|
|
446
|
+
for key in ["name", "size", "framework", "port", "is_dockerized", "entrypoint", "multi_service_compose", "logger", "extra_assets"]:
|
|
447
|
+
service_kwargs.pop(key, None)
|
|
448
|
+
|
|
449
|
+
for svc in self.services:
|
|
450
|
+
logger(f"\n[cyan]Deploying {svc.name}...[/cyan]")
|
|
451
|
+
|
|
452
|
+
# Generate single-service docker-compose
|
|
453
|
+
compose_content = self._generate_single_service_compose(svc)
|
|
454
|
+
|
|
455
|
+
try:
|
|
456
|
+
droplet_result = self.engine.deploy_server(
|
|
457
|
+
name=f"{self.project_name}-{svc.name}",
|
|
458
|
+
size="s-1vcpu-1gb", # Minimal size per service
|
|
459
|
+
framework=svc.framework,
|
|
460
|
+
port=svc.port,
|
|
461
|
+
is_dockerized=True,
|
|
462
|
+
entrypoint=svc.entrypoint,
|
|
463
|
+
multi_service_compose=compose_content,
|
|
464
|
+
logger=logger,
|
|
465
|
+
**service_kwargs
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
droplet = droplet_result.get("droplet")
|
|
469
|
+
if droplet:
|
|
470
|
+
result["droplets"][svc.name] = droplet
|
|
471
|
+
created_droplets.append(droplet)
|
|
472
|
+
|
|
473
|
+
# Get private IP for internal routing
|
|
474
|
+
private_ip = self._get_private_ip(droplet)
|
|
475
|
+
result["private_ips"][svc.name] = private_ip or droplet.ip_address
|
|
476
|
+
if not private_ip:
|
|
477
|
+
logger(f"[yellow] Start Up Warning: No private IP found for {svc.name}. Routing will use public IP.[/yellow]")
|
|
478
|
+
|
|
479
|
+
logger(f" [green]✓ {svc.name} deployed at {droplet.ip_address}[/green]")
|
|
480
|
+
else:
|
|
481
|
+
logger(f" [red]✗ {svc.name} deployment failed[/red]")
|
|
482
|
+
|
|
483
|
+
except Exception as e:
|
|
484
|
+
logger(f" [red]✗ {svc.name} failed: {e}[/red]")
|
|
485
|
+
# If a service fails, we continue best-effort deployment for now
|
|
486
|
+
# But status will reflect failure.
|
|
487
|
+
|
|
488
|
+
# Step 2: Deploy gateway droplet with Caddy routing
|
|
489
|
+
logger("\n[bold cyan]Phase 2: Deploying gateway droplet[/bold cyan]")
|
|
490
|
+
|
|
491
|
+
if result["private_ips"]:
|
|
492
|
+
# Generate Caddyfile pointing to private IPs
|
|
493
|
+
caddy_content = self._generate_multi_droplet_caddyfile(result["private_ips"])
|
|
494
|
+
|
|
495
|
+
# Sanitize kwargs for Gateway deployment
|
|
496
|
+
gateway_kwargs = kwargs.copy()
|
|
497
|
+
for key in ["name", "size", "framework", "port", "is_dockerized", "multi_service_caddy", "install_caddy", "logger"]:
|
|
498
|
+
gateway_kwargs.pop(key, None)
|
|
499
|
+
|
|
500
|
+
gateway_result = self.engine.deploy_server(
|
|
501
|
+
name=f"{self.project_name}-gateway",
|
|
502
|
+
size="s-1vcpu-1gb", # Gateway is lightweight
|
|
503
|
+
framework="other",
|
|
504
|
+
port=80,
|
|
505
|
+
is_dockerized=False, # Gateway runs Caddy directly
|
|
506
|
+
multi_service_caddy=caddy_content,
|
|
507
|
+
install_caddy=True, # Ensure Caddy is installed on host
|
|
508
|
+
logger=logger,
|
|
509
|
+
**gateway_kwargs
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
gateway = gateway_result.get("droplet")
|
|
513
|
+
if gateway:
|
|
514
|
+
result["gateway"] = gateway
|
|
515
|
+
created_droplets.append(gateway)
|
|
516
|
+
result["url"] = f"http://{gateway.ip_address}"
|
|
517
|
+
logger(f" [green]✓ Gateway deployed at {gateway.ip_address}[/green]")
|
|
518
|
+
|
|
519
|
+
# Step 3: Health check all services via gateway
|
|
520
|
+
logger("\n[bold cyan]Phase 3: Health checks[/bold cyan]")
|
|
521
|
+
|
|
522
|
+
if result["gateway"]:
|
|
523
|
+
result["services"] = self._health_check_all_services(
|
|
524
|
+
result["gateway"].ip_address,
|
|
525
|
+
logger=logger
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
# Determine overall status
|
|
529
|
+
healthy_count = sum(1 for v in result["services"].values() if v)
|
|
530
|
+
total_count = len(result["services"])
|
|
531
|
+
|
|
532
|
+
if healthy_count == total_count:
|
|
533
|
+
result["status"] = "SUCCESS"
|
|
534
|
+
logger(f"\n[bold green]✨ All {total_count} services healthy![/bold green]")
|
|
535
|
+
elif healthy_count > 0:
|
|
536
|
+
result["status"] = "PARTIAL"
|
|
537
|
+
logger(f"\n[yellow]⚠ {healthy_count}/{total_count} services healthy[/yellow]")
|
|
538
|
+
else:
|
|
539
|
+
result["status"] = "FAILED"
|
|
540
|
+
logger(f"\n[bold red]❌ All services failed health check[/bold red]")
|
|
541
|
+
else:
|
|
542
|
+
result["status"] = "FAILED"
|
|
543
|
+
logger("\n[bold red]❌ Gateway deployment failed[/bold red]")
|
|
544
|
+
|
|
545
|
+
except Exception as e:
|
|
546
|
+
logger(f"[bold red]Multi-droplet deployment failed: {e}[/bold red]")
|
|
547
|
+
result["error"] = str(e)
|
|
548
|
+
|
|
549
|
+
if cleanup_on_failure:
|
|
550
|
+
logger("[bold yellow]Cleaning up created resources...[/bold yellow]")
|
|
551
|
+
for d in created_droplets:
|
|
552
|
+
try:
|
|
553
|
+
logger(f" - Destroying droplet {d.name} ({d.id})...")
|
|
554
|
+
d.destroy()
|
|
555
|
+
except Exception as cleanup_err:
|
|
556
|
+
logger(f" - Failed to destroy {d.id}: {cleanup_err}")
|
|
557
|
+
else:
|
|
558
|
+
logger("[yellow]Resource cleanup skipped (cleanup_on_failure=False). Orphaned droplets may exist.[/yellow]")
|
|
559
|
+
|
|
560
|
+
return result
|
|
561
|
+
|
|
562
|
+
def _generate_single_service_compose(self, svc: ServiceDefinition) -> str:
|
|
563
|
+
"""Generate docker-compose.yml for a single service."""
|
|
564
|
+
command = svc.command
|
|
565
|
+
if not command and svc.entrypoint:
|
|
566
|
+
if svc.framework == "fastapi":
|
|
567
|
+
command = f"uvicorn {svc.entrypoint} --host 0.0.0.0 --port {svc.port}"
|
|
568
|
+
elif svc.framework == "flask":
|
|
569
|
+
command = f"gunicorn {svc.entrypoint} -b 0.0.0.0:{svc.port}"
|
|
570
|
+
elif svc.framework == "django":
|
|
571
|
+
command = f"gunicorn {svc.entrypoint} --bind 0.0.0.0:{svc.port}"
|
|
572
|
+
|
|
573
|
+
service_entry = {
|
|
574
|
+
"build": svc.path or ".",
|
|
575
|
+
"ports": [f"{svc.port}:{svc.port}"],
|
|
576
|
+
"restart": "unless-stopped",
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
if command:
|
|
580
|
+
service_entry["command"] = command
|
|
581
|
+
|
|
582
|
+
if svc.env:
|
|
583
|
+
service_entry["environment"] = svc.env
|
|
584
|
+
|
|
585
|
+
compose = {
|
|
586
|
+
"services": {
|
|
587
|
+
svc.name: service_entry
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
return yaml.dump(compose, default_flow_style=False, sort_keys=False)
|
|
592
|
+
|
|
593
|
+
def _generate_multi_droplet_caddyfile(self, private_ips: Dict[str, str]) -> str:
|
|
594
|
+
"""
|
|
595
|
+
Generate Caddyfile for multi-droplet routing.
|
|
596
|
+
|
|
597
|
+
Routes traffic to service droplets via their private IPs.
|
|
598
|
+
Includes 404 fallback to prevent false positive health checks.
|
|
599
|
+
"""
|
|
600
|
+
routes = []
|
|
601
|
+
|
|
602
|
+
for svc in self.services:
|
|
603
|
+
ip = private_ips.get(svc.name)
|
|
604
|
+
if ip:
|
|
605
|
+
route = f""" handle_path /{svc.name}* {{
|
|
606
|
+
reverse_proxy {ip}:{svc.port}
|
|
607
|
+
}}"""
|
|
608
|
+
routes.append(route)
|
|
609
|
+
|
|
610
|
+
caddyfile = f""":80 {{
|
|
611
|
+
{chr(10).join(routes)}
|
|
612
|
+
|
|
613
|
+
# Root page
|
|
614
|
+
handle / {{
|
|
615
|
+
respond "Xenfra Gateway - {self.project_name}" 200
|
|
616
|
+
}}
|
|
617
|
+
|
|
618
|
+
# Fallback for unmatched routes (prevents false positive health checks)
|
|
619
|
+
handle {{
|
|
620
|
+
respond "Not Found" 404
|
|
621
|
+
}}
|
|
622
|
+
}}"""
|
|
623
|
+
|
|
624
|
+
return caddyfile
|
|
625
|
+
|
|
626
|
+
def _get_private_ip(self, droplet) -> Optional[str]:
|
|
627
|
+
"""Get private IP of droplet if available."""
|
|
628
|
+
try:
|
|
629
|
+
for network in droplet.networks.get("v4", []):
|
|
630
|
+
if network.get("type") == "private":
|
|
631
|
+
return network.get("ip_address")
|
|
632
|
+
except (AttributeError, KeyError):
|
|
633
|
+
pass
|
|
634
|
+
return None
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
def get_orchestrator_for_project(engine, project_path: str = ".") -> Optional[ServiceOrchestrator]:
|
|
638
|
+
"""
|
|
639
|
+
Factory function to create an orchestrator if xenfra.yaml has services.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
engine: InfraEngine instance
|
|
643
|
+
project_path: Path to project directory
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
ServiceOrchestrator if services found in xenfra.yaml, None otherwise
|
|
647
|
+
"""
|
|
648
|
+
from .manifest import load_services_from_xenfra_yaml, get_deployment_mode
|
|
649
|
+
|
|
650
|
+
services = load_services_from_xenfra_yaml(project_path)
|
|
651
|
+
|
|
652
|
+
if services and len(services) > 1:
|
|
653
|
+
# Get project name from xenfra.yaml
|
|
654
|
+
yaml_path = Path(project_path) / "xenfra.yaml"
|
|
655
|
+
project_name = Path(project_path).name
|
|
656
|
+
mode = "single-droplet"
|
|
657
|
+
|
|
658
|
+
if yaml_path.exists():
|
|
659
|
+
with open(yaml_path) as f:
|
|
660
|
+
data = yaml.safe_load(f)
|
|
661
|
+
project_name = data.get("project_name", project_name)
|
|
662
|
+
mode = data.get("mode", "single-droplet")
|
|
663
|
+
|
|
664
|
+
return ServiceOrchestrator(engine, services, project_name, mode)
|
|
665
|
+
|
|
666
|
+
return None
|
xenfra_sdk/patterns.json
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
|
-
{
|
|
2
|
-
"redaction_patterns": [
|
|
3
|
-
"dop_v1_[a-f0-9]{64}",
|
|
4
|
-
"[sp]k_live_[a-zA-Z0-9]{24,}",
|
|
5
|
-
"[sp]k_test_[a-zA-Z0-9]{24,}",
|
|
6
|
-
"(https?://)[^\\s:]+:[^\\s@]+",
|
|
7
|
-
"Bearer\\s[a-zA-Z0-9\\._\\-]{20,}",
|
|
8
|
-
"(password|pwd|pass)=([^\\s&]+)",
|
|
9
|
-
"AKIA[0-9A-Z]{16}",
|
|
10
|
-
"[0-9a-zA-Z/+]{40}",
|
|
11
|
-
"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b",
|
|
12
|
-
"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"
|
|
13
|
-
]
|
|
1
|
+
{
|
|
2
|
+
"redaction_patterns": [
|
|
3
|
+
"dop_v1_[a-f0-9]{64}",
|
|
4
|
+
"[sp]k_live_[a-zA-Z0-9]{24,}",
|
|
5
|
+
"[sp]k_test_[a-zA-Z0-9]{24,}",
|
|
6
|
+
"(https?://)[^\\s:]+:[^\\s@]+",
|
|
7
|
+
"Bearer\\s[a-zA-Z0-9\\._\\-]{20,}",
|
|
8
|
+
"(password|pwd|pass)=([^\\s&]+)",
|
|
9
|
+
"AKIA[0-9A-Z]{16}",
|
|
10
|
+
"[0-9a-zA-Z/+]{40}",
|
|
11
|
+
"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b",
|
|
12
|
+
"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"
|
|
13
|
+
]
|
|
14
14
|
}
|