xenfra-sdk 0.1.9__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/PKG-INFO +1 -1
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/pyproject.toml +1 -1
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/dockerizer.py +29 -12
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/engine.py +82 -22
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/models.py +1 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/recipes.py +5 -4
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/resources/deployments.py +17 -6
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/templates/cloud-init.sh.j2 +31 -9
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/templates/docker-compose.yml.j2 +3 -9
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/README.md +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/cli/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/cli/main.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/client.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/client_with_hooks.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/config.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/db/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/db/models.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/db/session.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/dependencies.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/exceptions.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/mcp_client.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/patterns.json +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/privacy.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/resources/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/resources/base.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/resources/intelligence.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/resources/projects.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/security.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/templates/Dockerfile.j2 +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.1}/src/xenfra_sdk/utils.py +0 -0
|
@@ -13,11 +13,12 @@ def detect_framework(path="."):
|
|
|
13
13
|
|
|
14
14
|
# Check for Django first (common pattern: manage.py in root)
|
|
15
15
|
if (project_root / "manage.py").is_file():
|
|
16
|
+
# Assume the project name is the current directory name
|
|
16
17
|
project_name = project_root.name
|
|
17
18
|
return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
|
|
18
19
|
|
|
19
20
|
candidate_files = []
|
|
20
|
-
|
|
21
|
+
|
|
21
22
|
# Check directly in project root
|
|
22
23
|
for name in ["main.py", "app.py"]:
|
|
23
24
|
if (project_root / name).is_file():
|
|
@@ -33,17 +34,23 @@ def detect_framework(path="."):
|
|
|
33
34
|
for file_path in candidate_files:
|
|
34
35
|
with open(file_path, "r") as f:
|
|
35
36
|
content = f.read()
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
module_name
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
|
|
40
|
+
# If path is like src/testdeploy/main.py, module_name becomes src.testdeploy.main
|
|
41
|
+
if module_name.startswith("src."):
|
|
42
|
+
# Strip the "src." prefix for gunicorn/uvicorn
|
|
43
|
+
module_name = module_name[4:]
|
|
44
|
+
except ValueError:
|
|
45
|
+
module_name = file_path.stem
|
|
40
46
|
|
|
41
47
|
if "FastAPI" in content:
|
|
48
|
+
# Use standard :app convention
|
|
42
49
|
return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
|
|
43
|
-
|
|
50
|
+
|
|
44
51
|
if "Flask" in content:
|
|
45
52
|
return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
|
|
46
|
-
|
|
53
|
+
|
|
47
54
|
return None, None, None
|
|
48
55
|
|
|
49
56
|
|
|
@@ -59,14 +66,24 @@ def generate_templated_assets(context: dict):
|
|
|
59
66
|
template_dir = Path(__file__).parent / "templates"
|
|
60
67
|
env = Environment(loader=FileSystemLoader(template_dir))
|
|
61
68
|
|
|
62
|
-
# Detect framework specifics
|
|
63
|
-
|
|
69
|
+
# Detect framework specifics (use context if available, otherwise fallback to manual)
|
|
70
|
+
detected_framework, detected_port, detected_command = detect_framework()
|
|
71
|
+
|
|
72
|
+
framework = context.get("framework") or detected_framework
|
|
73
|
+
port = context.get("port") or detected_port
|
|
74
|
+
command = context.get("command") or detected_command
|
|
75
|
+
|
|
64
76
|
if not framework:
|
|
65
|
-
print("Warning: No recognizable web framework detected.")
|
|
77
|
+
print("Warning: No recognizable web framework detected and no framework provided in context.")
|
|
66
78
|
return []
|
|
67
79
|
|
|
68
|
-
# Merge detected
|
|
69
|
-
render_context = {
|
|
80
|
+
# Merge detected values with provided context (context takes precedence)
|
|
81
|
+
render_context = {
|
|
82
|
+
"framework": framework,
|
|
83
|
+
"port": port,
|
|
84
|
+
"command": command,
|
|
85
|
+
**context
|
|
86
|
+
}
|
|
70
87
|
|
|
71
88
|
generated_files = []
|
|
72
89
|
|
|
@@ -162,7 +162,7 @@ class InfraEngine:
|
|
|
162
162
|
# 3. Stream logs
|
|
163
163
|
ip_address = droplet.ip_address
|
|
164
164
|
with self._get_connection(ip_address) as conn:
|
|
165
|
-
conn.run("cd /root/app && docker
|
|
165
|
+
conn.run("cd /root/app && docker compose logs -f app", pty=True)
|
|
166
166
|
|
|
167
167
|
def get_account_balance(self) -> dict:
|
|
168
168
|
"""
|
|
@@ -242,7 +242,11 @@ class InfraEngine:
|
|
|
242
242
|
email: Optional[str] = None,
|
|
243
243
|
domain: Optional[str] = None,
|
|
244
244
|
repo_url: Optional[str] = None,
|
|
245
|
+
is_dockerized: bool = True,
|
|
245
246
|
db_session: Session = None,
|
|
247
|
+
port: int = 8000,
|
|
248
|
+
command: str = None,
|
|
249
|
+
database: str = None,
|
|
246
250
|
**kwargs,
|
|
247
251
|
):
|
|
248
252
|
"""A stateful, blocking orchestrator for deploying a new server."""
|
|
@@ -259,7 +263,10 @@ class InfraEngine:
|
|
|
259
263
|
"email": email,
|
|
260
264
|
"domain": domain,
|
|
261
265
|
"repo_url": repo_url,
|
|
262
|
-
|
|
266
|
+
"port": port,
|
|
267
|
+
"command": command,
|
|
268
|
+
"database": database,
|
|
269
|
+
**kwargs, # Pass any additional config
|
|
263
270
|
}
|
|
264
271
|
files = dockerizer.generate_templated_assets(context)
|
|
265
272
|
for file in files:
|
|
@@ -267,7 +274,7 @@ class InfraEngine:
|
|
|
267
274
|
|
|
268
275
|
# === 3. CLOUD-INIT STAGE ===
|
|
269
276
|
logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
|
|
270
|
-
cloud_init_script = recipes.generate_stack(context)
|
|
277
|
+
cloud_init_script = recipes.generate_stack(context, is_dockerized=is_dockerized)
|
|
271
278
|
logger(" - Generated cloud-init script.")
|
|
272
279
|
logger(
|
|
273
280
|
f"--- Cloud-init script content ---\n{cloud_init_script}\n---------------------------------"
|
|
@@ -281,8 +288,8 @@ class InfraEngine:
|
|
|
281
288
|
region=region,
|
|
282
289
|
image=image,
|
|
283
290
|
size_slug=size,
|
|
284
|
-
ssh_keys=[ssh_key],
|
|
285
|
-
|
|
291
|
+
ssh_keys=[ssh_key.id],
|
|
292
|
+
user_data=cloud_init_script,
|
|
286
293
|
tags=["xenfra"],
|
|
287
294
|
)
|
|
288
295
|
droplet.create()
|
|
@@ -323,11 +330,37 @@ class InfraEngine:
|
|
|
323
330
|
if not conn or not conn.is_connected:
|
|
324
331
|
raise DeploymentError("Could not establish SSH connection.", stage="Polling")
|
|
325
332
|
|
|
333
|
+
logger(" - [DEBUG] Entering SSH context for Phase 5 polling...")
|
|
326
334
|
with conn:
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
335
|
+
last_log_line = 0
|
|
336
|
+
logger(" - Polling server setup log (/root/setup.log)...")
|
|
337
|
+
for i in range(120): # 20-minute timeout
|
|
338
|
+
# Heartbeat
|
|
339
|
+
if i % 3 == 0: # Every 30 seconds
|
|
340
|
+
logger(f" - Phase 5 Heartbeat: Waiting for setup completion ({i+1}/120)...")
|
|
341
|
+
|
|
342
|
+
# Check for completion with timeout
|
|
343
|
+
try:
|
|
344
|
+
check_result = conn.run("test -f /root/setup_complete", warn=True, hide=True, timeout=10)
|
|
345
|
+
if check_result.ok:
|
|
346
|
+
logger(" - Cloud-init setup complete.")
|
|
347
|
+
break
|
|
348
|
+
except Exception as e:
|
|
349
|
+
logger(f" - [Warning] Status check failed: {e}. Retrying...")
|
|
350
|
+
|
|
351
|
+
# Tail the setup log for visibility
|
|
352
|
+
try:
|
|
353
|
+
log_result = conn.run(f"tail -n +{last_log_line + 1} /root/setup.log 2>/dev/null", warn=True, hide=True, timeout=10)
|
|
354
|
+
if log_result.ok and log_result.stdout.strip():
|
|
355
|
+
new_lines = log_result.stdout.strip().split("\n")
|
|
356
|
+
for line in new_lines:
|
|
357
|
+
if line.strip():
|
|
358
|
+
logger(f" [Server Setup] {line.strip()}")
|
|
359
|
+
last_log_line += len(new_lines)
|
|
360
|
+
except Exception as e:
|
|
361
|
+
# Log doesn't exist yet or tail failed
|
|
362
|
+
pass
|
|
363
|
+
|
|
331
364
|
time.sleep(10)
|
|
332
365
|
else:
|
|
333
366
|
raise DeploymentError(
|
|
@@ -342,18 +375,39 @@ class InfraEngine:
|
|
|
342
375
|
logger(f" - Cloning repository from {repo_url}...")
|
|
343
376
|
conn.run(f"git clone {repo_url} /root/app")
|
|
344
377
|
else:
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
378
|
+
# Safety check: Prevent accidental deployment of Xenfra service code
|
|
379
|
+
if os.getenv("XENFRA_SERVICE_MODE") == "true":
|
|
380
|
+
raise DeploymentError(
|
|
381
|
+
"Local folder deployment is not yet supported via the cloud API. "
|
|
382
|
+
"Please provide a git_repo URL in your xenfra.yaml or CLI command.",
|
|
383
|
+
stage="Code Upload",
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Use rsync for efficient local folder upload
|
|
387
|
+
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
388
|
+
rsync_cmd = f'rsync -avz --exclude=".git" --exclude=".venv" --exclude="__pycache__" -e "ssh -i {private_key_path} -o StrictHostKeyChecking=no" . root@{ip_address}:/root/app/'
|
|
389
|
+
logger(f" - Uploading local code via rsync...")
|
|
390
|
+
result = subprocess.run(rsync_cmd, shell=True, capture_output=True, text=True)
|
|
391
|
+
if result.returncode != 0:
|
|
392
|
+
raise DeploymentError(f"rsync failed: {result.stderr}", stage="Code Upload")
|
|
348
393
|
logger(" - Code upload complete.")
|
|
349
394
|
|
|
350
395
|
# === 7. FINAL DEPLOY STAGE ===
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
396
|
+
if is_dockerized:
|
|
397
|
+
logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
|
|
398
|
+
with self._get_connection(ip_address) as conn:
|
|
399
|
+
result = conn.run("cd /root/app && docker compose up -d --build", hide=True)
|
|
400
|
+
if result.failed:
|
|
401
|
+
raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
|
|
402
|
+
logger(" - Docker containers are building in the background...")
|
|
403
|
+
else:
|
|
404
|
+
logger("\n[bold blue]PHASE 7: STARTING HOST-BASED APPLICATION[/bold blue]")
|
|
405
|
+
start_command = context.get("command", f"uvicorn main:app --port {context.get('port', 8000)}")
|
|
406
|
+
with self._get_connection(ip_address) as conn:
|
|
407
|
+
result = conn.run(f"cd /root/app && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt && nohup .venv/bin/{start_command} > app.log 2>&1 &", hide=True)
|
|
408
|
+
if result.failed:
|
|
409
|
+
raise DeploymentError(f"Host-based start failed: {result.stderr}", stage="Deploy")
|
|
410
|
+
logger(f" - Application started via: {start_command}")
|
|
357
411
|
|
|
358
412
|
# === 8. VERIFICATION STAGE ===
|
|
359
413
|
logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
|
|
@@ -361,9 +415,15 @@ class InfraEngine:
|
|
|
361
415
|
for i in range(24): # 2-minute timeout for health checks
|
|
362
416
|
logger(f" - Health check attempt {i + 1}/24...")
|
|
363
417
|
with self._get_connection(ip_address) as conn:
|
|
364
|
-
# Check if
|
|
365
|
-
|
|
366
|
-
|
|
418
|
+
# Check if running
|
|
419
|
+
if is_dockerized:
|
|
420
|
+
ps_result = conn.run("cd /root/app && docker compose ps", hide=True)
|
|
421
|
+
running = "running" in ps_result.stdout
|
|
422
|
+
else:
|
|
423
|
+
ps_result = conn.run("ps aux | grep -v grep | grep python", hide=True)
|
|
424
|
+
running = ps_result.ok and len(ps_result.stdout.strip()) > 0
|
|
425
|
+
|
|
426
|
+
if not running:
|
|
367
427
|
time.sleep(5)
|
|
368
428
|
continue
|
|
369
429
|
|
|
@@ -396,7 +456,7 @@ class InfraEngine:
|
|
|
396
456
|
else:
|
|
397
457
|
# On failure, get logs and destroy droplet
|
|
398
458
|
with self._get_connection(ip_address) as conn:
|
|
399
|
-
logs = conn.run("cd /root/app && docker
|
|
459
|
+
logs = conn.run("cd /root/app && docker compose logs", hide=True).stdout
|
|
400
460
|
raise DeploymentError(
|
|
401
461
|
f"Application failed to become healthy in time. Logs:\n{logs}",
|
|
402
462
|
stage="Verification",
|
|
@@ -178,5 +178,6 @@ class CodebaseAnalysisResponse(BaseModel):
|
|
|
178
178
|
..., description="Recommended instance size (basic, standard, premium)"
|
|
179
179
|
)
|
|
180
180
|
estimated_cost_monthly: float = Field(..., description="Estimated monthly cost in USD")
|
|
181
|
+
is_dockerized: bool = Field(True, description="Whether to use Docker containerization")
|
|
181
182
|
confidence: float = Field(..., description="Confidence score (0.0-1.0)")
|
|
182
183
|
notes: str | None = Field(None, description="Additional observations")
|
|
@@ -3,13 +3,14 @@ from pathlib import Path
|
|
|
3
3
|
from jinja2 import Environment, FileSystemLoader
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
def generate_stack(context: dict):
|
|
6
|
+
def generate_stack(context: dict, is_dockerized: bool = True):
|
|
7
7
|
"""
|
|
8
8
|
Generates a cloud-init startup script from a Jinja2 template.
|
|
9
9
|
|
|
10
10
|
Args:
|
|
11
11
|
context: A dictionary containing information for rendering the template,
|
|
12
12
|
e.g., {'domain': 'example.com', 'email': 'user@example.com'}
|
|
13
|
+
is_dockerized: Whether to setup Docker and Docker Compose (default: True)
|
|
13
14
|
"""
|
|
14
15
|
# Path to the templates directory
|
|
15
16
|
template_dir = Path(__file__).parent / "templates"
|
|
@@ -17,9 +18,9 @@ def generate_stack(context: dict):
|
|
|
17
18
|
|
|
18
19
|
template = env.get_template("cloud-init.sh.j2")
|
|
19
20
|
|
|
20
|
-
# The non-dockerized logic has been removed as we are focusing on
|
|
21
|
-
# a purely Docker-based deployment strategy for simplicity and scalability.
|
|
22
21
|
# The context will contain all necessary variables for the template.
|
|
23
|
-
|
|
22
|
+
# Pass is_dockerized to the template for conditional setup
|
|
23
|
+
render_context = {**context, "is_dockerized": is_dockerized}
|
|
24
|
+
script = template.render(render_context)
|
|
24
25
|
|
|
25
26
|
return script
|
|
@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class DeploymentsManager(BaseManager):
|
|
15
|
-
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None) -> dict:
|
|
15
|
+
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True) -> dict:
|
|
16
16
|
"""Creates a new deployment."""
|
|
17
17
|
try:
|
|
18
18
|
payload = {
|
|
@@ -25,6 +25,8 @@ class DeploymentsManager(BaseManager):
|
|
|
25
25
|
payload["region"] = region
|
|
26
26
|
if size_slug:
|
|
27
27
|
payload["size_slug"] = size_slug
|
|
28
|
+
if is_dockerized is not None:
|
|
29
|
+
payload["is_dockerized"] = is_dockerized
|
|
28
30
|
|
|
29
31
|
response = self._client._request("POST", "/deployments", json=payload)
|
|
30
32
|
# Safe JSON parsing
|
|
@@ -95,7 +97,7 @@ class DeploymentsManager(BaseManager):
|
|
|
95
97
|
except Exception as e:
|
|
96
98
|
raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
|
|
97
99
|
|
|
98
|
-
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None) -> Iterator[dict]:
|
|
100
|
+
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, database: str = None) -> Iterator[dict]:
|
|
99
101
|
"""
|
|
100
102
|
Creates a new deployment with real-time SSE log streaming.
|
|
101
103
|
|
|
@@ -108,6 +110,10 @@ class DeploymentsManager(BaseManager):
|
|
|
108
110
|
framework: Framework type (fastapi, flask, django)
|
|
109
111
|
region: DigitalOcean region (optional)
|
|
110
112
|
size_slug: DigitalOcean droplet size (optional)
|
|
113
|
+
is_dockerized: Whether to use Docker (optional)
|
|
114
|
+
port: Application port (optional, default 8000)
|
|
115
|
+
command: Start command (optional, auto-detected if not provided)
|
|
116
|
+
database: Database type (optional, e.g. 'postgres')
|
|
111
117
|
|
|
112
118
|
Yields:
|
|
113
119
|
dict: SSE events with 'event' and 'data' fields
|
|
@@ -129,6 +135,14 @@ class DeploymentsManager(BaseManager):
|
|
|
129
135
|
payload["region"] = region
|
|
130
136
|
if size_slug:
|
|
131
137
|
payload["size_slug"] = size_slug
|
|
138
|
+
if is_dockerized is not None:
|
|
139
|
+
payload["is_dockerized"] = is_dockerized
|
|
140
|
+
if port:
|
|
141
|
+
payload["port"] = port
|
|
142
|
+
if command:
|
|
143
|
+
payload["command"] = command
|
|
144
|
+
if database:
|
|
145
|
+
payload["database"] = database
|
|
132
146
|
|
|
133
147
|
try:
|
|
134
148
|
# Use httpx to stream the SSE response
|
|
@@ -146,11 +160,8 @@ class DeploymentsManager(BaseManager):
|
|
|
146
160
|
streaming_api_url = os.getenv("XENFRA_STREAMING_API_URL")
|
|
147
161
|
if streaming_api_url:
|
|
148
162
|
base_url = streaming_api_url
|
|
149
|
-
elif self._client.api_url == "https://api.xenfra.tech":
|
|
150
|
-
# Production: use non-proxied streaming subdomain
|
|
151
|
-
base_url = "https://stream.xenfra.tech"
|
|
152
163
|
else:
|
|
153
|
-
# Local/dev: use regular API URL
|
|
164
|
+
# Local/dev/production: use regular API URL
|
|
154
165
|
base_url = self._client.api_url
|
|
155
166
|
|
|
156
167
|
url = f"{base_url}/deployments/stream"
|
|
@@ -11,17 +11,30 @@ echo "--------------------------------" >> $LOG
|
|
|
11
11
|
mkdir -p /root/app
|
|
12
12
|
cd /root/app
|
|
13
13
|
|
|
14
|
-
# ---
|
|
15
|
-
echo "⚔️ [0/6]
|
|
14
|
+
# --- MERCILESS FIX: TERMINATE BACKGROUND PROCESSES ---
|
|
15
|
+
echo "⚔️ [0/6] Mercilessly Terminating Background Processes..." >> $LOG
|
|
16
|
+
|
|
17
|
+
kill_apt_processes() {
|
|
18
|
+
echo "🎯 Killing processes holding apt/dpkg locks..." >> $LOG
|
|
19
|
+
fuser -k /var/lib/dpkg/lock >/dev/null 2>&1
|
|
20
|
+
fuser -k /var/lib/apt/lists/lock >/dev/null 2>&1
|
|
21
|
+
fuser -k /var/lib/dpkg/lock-frontends >/dev/null 2>&1
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
# Explicitly stop and disable services that cause locks
|
|
16
25
|
systemctl stop unattended-upgrades.service || true
|
|
26
|
+
systemctl disable unattended-upgrades.service || true
|
|
17
27
|
systemctl stop apt-daily.service || true
|
|
28
|
+
systemctl disable apt-daily.service || true
|
|
18
29
|
systemctl stop apt-daily-upgrade.service || true
|
|
19
|
-
systemctl
|
|
20
|
-
|
|
30
|
+
systemctl disable apt-daily-upgrade.service || true
|
|
31
|
+
|
|
32
|
+
# Forcefully kill any remaining lock holders
|
|
33
|
+
kill_apt_processes
|
|
21
34
|
|
|
22
|
-
# Force remove locks if they exist
|
|
35
|
+
# Force remove locks if they still exist (The Nuclear Option)
|
|
23
36
|
rm -f /var/lib/dpkg/lock*
|
|
24
|
-
rm -f /var/lib/apt/lists/lock
|
|
37
|
+
rm -f /var/lib/apt/lists/lock*
|
|
25
38
|
rm -f /var/cache/apt/archives/lock
|
|
26
39
|
dpkg --configure -a || true
|
|
27
40
|
# -----------------------------------------------
|
|
@@ -31,19 +44,28 @@ echo "🔄 [1/5] Refreshing Package Lists..." >> $LOG
|
|
|
31
44
|
apt-get update
|
|
32
45
|
apt-get install -y python3-pip git curl
|
|
33
46
|
|
|
34
|
-
# 2.
|
|
47
|
+
# 2. Setup Environment
|
|
48
|
+
{% if is_dockerized %}
|
|
35
49
|
echo "🐳 [2/5] Installing Docker..." >> $LOG
|
|
36
50
|
apt-get install -y docker.io || (curl -fsSL https://get.docker.com | sh)
|
|
37
51
|
echo "🎶 [3/5] Installing Docker Compose..." >> $LOG
|
|
38
52
|
apt-get install -y docker-compose-v2
|
|
53
|
+
{% else %}
|
|
54
|
+
echo "🐍 [2/5] Setting up host-based Python environment..." >> $LOG
|
|
55
|
+
apt-get install -y python3-venv python3-dev build-essential
|
|
56
|
+
{% endif %}
|
|
39
57
|
|
|
40
|
-
#
|
|
41
|
-
|
|
58
|
+
# 3. Setup Reverse Proxy
|
|
59
|
+
{% if is_dockerized %}
|
|
60
|
+
echo "📦 [3/5] Installing Caddy..." >> $LOG
|
|
42
61
|
apt-get install -y debian-keyring debian-archive-keyring apt-transport-https
|
|
43
62
|
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
|
44
63
|
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list
|
|
45
64
|
apt-get update
|
|
46
65
|
apt-get install -y caddy
|
|
66
|
+
{% else %}
|
|
67
|
+
echo "🛡️ [3/5] Skipping Caddy for host deployment (setup manual reverse proxy if needed)." >> $LOG
|
|
68
|
+
{% endif %}
|
|
47
69
|
|
|
48
70
|
{% if domain %}
|
|
49
71
|
# Dynamically generate Caddyfile content
|
|
@@ -6,17 +6,13 @@ services:
|
|
|
6
6
|
build: .
|
|
7
7
|
ports:
|
|
8
8
|
- "{{ port | default(8000) }}:{{ port | default(8000) }}"
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
command: {{ command }}
|
|
12
|
-
{% if database == 'postgres' %}
|
|
9
|
+
command: {{ command | default('uvicorn main:app --host 0.0.0.0 --port 8000') }}
|
|
10
|
+
{% if database == 'postgres' or database == 'postgresql' %}
|
|
13
11
|
depends_on:
|
|
14
12
|
- db
|
|
15
13
|
environment:
|
|
16
14
|
- DATABASE_URL=postgresql://{{ db_user | default('user') }}:{{ db_password | default('password') }}@db:5432/{{ db_name | default('appdb') }}
|
|
17
|
-
{% endif %}
|
|
18
15
|
|
|
19
|
-
{% if database == 'postgres' %}
|
|
20
16
|
db:
|
|
21
17
|
image: postgres:15-alpine
|
|
22
18
|
volumes:
|
|
@@ -25,9 +21,7 @@ services:
|
|
|
25
21
|
- POSTGRES_USER={{ db_user | default('user') }}
|
|
26
22
|
- POSTGRES_PASSWORD={{ db_password | default('password') }}
|
|
27
23
|
- POSTGRES_DB={{ db_name | default('appdb') }}
|
|
28
|
-
{% endif %}
|
|
29
24
|
|
|
30
25
|
volumes:
|
|
31
|
-
{% if database == 'postgres' %}
|
|
32
26
|
postgres_data:
|
|
33
|
-
|
|
27
|
+
{% endif %}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|