xenfra-sdk 0.1.9__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/PKG-INFO +1 -1
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/pyproject.toml +1 -1
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/dockerizer.py +29 -12
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/engine.py +47 -11
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/models.py +1 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/recipes.py +5 -4
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/resources/deployments.py +6 -2
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/templates/cloud-init.sh.j2 +31 -9
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/README.md +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/cli/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/cli/main.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/client.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/client_with_hooks.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/config.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/db/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/db/models.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/db/session.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/dependencies.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/exceptions.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/mcp_client.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/patterns.json +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/privacy.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/resources/__init__.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/resources/base.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/resources/intelligence.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/resources/projects.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/security.py +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/templates/Dockerfile.j2 +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/templates/docker-compose.yml.j2 +0 -0
- {xenfra_sdk-0.1.9 → xenfra_sdk-0.2.0}/src/xenfra_sdk/utils.py +0 -0
|
@@ -13,11 +13,12 @@ def detect_framework(path="."):
|
|
|
13
13
|
|
|
14
14
|
# Check for Django first (common pattern: manage.py in root)
|
|
15
15
|
if (project_root / "manage.py").is_file():
|
|
16
|
+
# Assume the project name is the current directory name
|
|
16
17
|
project_name = project_root.name
|
|
17
18
|
return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
|
|
18
19
|
|
|
19
20
|
candidate_files = []
|
|
20
|
-
|
|
21
|
+
|
|
21
22
|
# Check directly in project root
|
|
22
23
|
for name in ["main.py", "app.py"]:
|
|
23
24
|
if (project_root / name).is_file():
|
|
@@ -33,17 +34,23 @@ def detect_framework(path="."):
|
|
|
33
34
|
for file_path in candidate_files:
|
|
34
35
|
with open(file_path, "r") as f:
|
|
35
36
|
content = f.read()
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
module_name
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
|
|
40
|
+
# If path is like src/testdeploy/main.py, module_name becomes src.testdeploy.main
|
|
41
|
+
if module_name.startswith("src."):
|
|
42
|
+
# Strip the "src." prefix for gunicorn/uvicorn
|
|
43
|
+
module_name = module_name[4:]
|
|
44
|
+
except ValueError:
|
|
45
|
+
module_name = file_path.stem
|
|
40
46
|
|
|
41
47
|
if "FastAPI" in content:
|
|
48
|
+
# Use standard :app convention
|
|
42
49
|
return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
|
|
43
|
-
|
|
50
|
+
|
|
44
51
|
if "Flask" in content:
|
|
45
52
|
return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
|
|
46
|
-
|
|
53
|
+
|
|
47
54
|
return None, None, None
|
|
48
55
|
|
|
49
56
|
|
|
@@ -59,14 +66,24 @@ def generate_templated_assets(context: dict):
|
|
|
59
66
|
template_dir = Path(__file__).parent / "templates"
|
|
60
67
|
env = Environment(loader=FileSystemLoader(template_dir))
|
|
61
68
|
|
|
62
|
-
# Detect framework specifics
|
|
63
|
-
|
|
69
|
+
# Detect framework specifics (use context if available, otherwise fallback to manual)
|
|
70
|
+
detected_framework, detected_port, detected_command = detect_framework()
|
|
71
|
+
|
|
72
|
+
framework = context.get("framework") or detected_framework
|
|
73
|
+
port = context.get("port") or detected_port
|
|
74
|
+
command = context.get("command") or detected_command
|
|
75
|
+
|
|
64
76
|
if not framework:
|
|
65
|
-
print("Warning: No recognizable web framework detected.")
|
|
77
|
+
print("Warning: No recognizable web framework detected and no framework provided in context.")
|
|
66
78
|
return []
|
|
67
79
|
|
|
68
|
-
# Merge detected
|
|
69
|
-
render_context = {
|
|
80
|
+
# Merge detected values with provided context (context takes precedence)
|
|
81
|
+
render_context = {
|
|
82
|
+
"framework": framework,
|
|
83
|
+
"port": port,
|
|
84
|
+
"command": command,
|
|
85
|
+
**context
|
|
86
|
+
}
|
|
70
87
|
|
|
71
88
|
generated_files = []
|
|
72
89
|
|
|
@@ -242,6 +242,7 @@ class InfraEngine:
|
|
|
242
242
|
email: Optional[str] = None,
|
|
243
243
|
domain: Optional[str] = None,
|
|
244
244
|
repo_url: Optional[str] = None,
|
|
245
|
+
is_dockerized: bool = True,
|
|
245
246
|
db_session: Session = None,
|
|
246
247
|
**kwargs,
|
|
247
248
|
):
|
|
@@ -267,7 +268,7 @@ class InfraEngine:
|
|
|
267
268
|
|
|
268
269
|
# === 3. CLOUD-INIT STAGE ===
|
|
269
270
|
logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
|
|
270
|
-
cloud_init_script = recipes.generate_stack(context)
|
|
271
|
+
cloud_init_script = recipes.generate_stack(context, is_dockerized=is_dockerized)
|
|
271
272
|
logger(" - Generated cloud-init script.")
|
|
272
273
|
logger(
|
|
273
274
|
f"--- Cloud-init script content ---\n{cloud_init_script}\n---------------------------------"
|
|
@@ -324,10 +325,22 @@ class InfraEngine:
|
|
|
324
325
|
raise DeploymentError("Could not establish SSH connection.", stage="Polling")
|
|
325
326
|
|
|
326
327
|
with conn:
|
|
328
|
+
last_log_line = 0
|
|
327
329
|
for i in range(120): # 20-minute timeout for cloud-init (installing Docker, Caddy, etc.)
|
|
328
|
-
|
|
330
|
+
# Check for completion
|
|
331
|
+
if conn.run("test -f /root/setup_complete", warn=True, hide=True).ok:
|
|
329
332
|
logger(" - Cloud-init setup complete.")
|
|
330
333
|
break
|
|
334
|
+
|
|
335
|
+
# Tail the setup log for visibility
|
|
336
|
+
log_result = conn.run(f"tail -n +{last_log_line + 1} /root/setup.log 2>/dev/null", warn=True, hide=True)
|
|
337
|
+
if log_result.ok and log_result.stdout.strip():
|
|
338
|
+
new_lines = log_result.stdout.strip().split("\n")
|
|
339
|
+
for line in new_lines:
|
|
340
|
+
if line.strip():
|
|
341
|
+
logger(f" [Server Setup] {line.strip()}")
|
|
342
|
+
last_log_line += len(new_lines)
|
|
343
|
+
|
|
331
344
|
time.sleep(10)
|
|
332
345
|
else:
|
|
333
346
|
raise DeploymentError(
|
|
@@ -342,18 +355,35 @@ class InfraEngine:
|
|
|
342
355
|
logger(f" - Cloning repository from {repo_url}...")
|
|
343
356
|
conn.run(f"git clone {repo_url} /root/app")
|
|
344
357
|
else:
|
|
358
|
+
# Safety check: Prevent accidental deployment of Xenfra service code
|
|
359
|
+
if os.getenv("XENFRA_SERVICE_MODE") == "true":
|
|
360
|
+
raise DeploymentError(
|
|
361
|
+
"Local folder deployment is not yet supported via the cloud API. "
|
|
362
|
+
"Please provide a git_repo URL in your xenfra.yaml or CLI command.",
|
|
363
|
+
stage="Code Upload",
|
|
364
|
+
)
|
|
365
|
+
|
|
345
366
|
fabric.transfer.Transfer(conn).upload(
|
|
346
367
|
".", "/root/app", exclude=[".git", ".venv", "__pycache__"]
|
|
347
368
|
)
|
|
348
369
|
logger(" - Code upload complete.")
|
|
349
370
|
|
|
350
371
|
# === 7. FINAL DEPLOY STAGE ===
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
372
|
+
if is_dockerized:
|
|
373
|
+
logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
|
|
374
|
+
with self._get_connection(ip_address) as conn:
|
|
375
|
+
result = conn.run("cd /root/app && docker-compose up -d --build", hide=True)
|
|
376
|
+
if result.failed:
|
|
377
|
+
raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
|
|
378
|
+
logger(" - Docker containers are building in the background...")
|
|
379
|
+
else:
|
|
380
|
+
logger("\n[bold blue]PHASE 7: STARTING HOST-BASED APPLICATION[/bold blue]")
|
|
381
|
+
start_command = context.get("command", f"uvicorn main:app --port {context.get('port', 8000)}")
|
|
382
|
+
with self._get_connection(ip_address) as conn:
|
|
383
|
+
result = conn.run(f"cd /root/app && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt && nohup .venv/bin/{start_command} > app.log 2>&1 &", hide=True)
|
|
384
|
+
if result.failed:
|
|
385
|
+
raise DeploymentError(f"Host-based start failed: {result.stderr}", stage="Deploy")
|
|
386
|
+
logger(f" - Application started via: {start_command}")
|
|
357
387
|
|
|
358
388
|
# === 8. VERIFICATION STAGE ===
|
|
359
389
|
logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
|
|
@@ -361,9 +391,15 @@ class InfraEngine:
|
|
|
361
391
|
for i in range(24): # 2-minute timeout for health checks
|
|
362
392
|
logger(f" - Health check attempt {i + 1}/24...")
|
|
363
393
|
with self._get_connection(ip_address) as conn:
|
|
364
|
-
# Check if
|
|
365
|
-
|
|
366
|
-
|
|
394
|
+
# Check if running
|
|
395
|
+
if is_dockerized:
|
|
396
|
+
ps_result = conn.run("cd /root/app && docker-compose ps", hide=True)
|
|
397
|
+
running = "running" in ps_result.stdout
|
|
398
|
+
else:
|
|
399
|
+
ps_result = conn.run("ps aux | grep -v grep | grep python", hide=True)
|
|
400
|
+
running = ps_result.ok and len(ps_result.stdout.strip()) > 0
|
|
401
|
+
|
|
402
|
+
if not running:
|
|
367
403
|
time.sleep(5)
|
|
368
404
|
continue
|
|
369
405
|
|
|
@@ -178,5 +178,6 @@ class CodebaseAnalysisResponse(BaseModel):
|
|
|
178
178
|
..., description="Recommended instance size (basic, standard, premium)"
|
|
179
179
|
)
|
|
180
180
|
estimated_cost_monthly: float = Field(..., description="Estimated monthly cost in USD")
|
|
181
|
+
is_dockerized: bool = Field(True, description="Whether to use Docker containerization")
|
|
181
182
|
confidence: float = Field(..., description="Confidence score (0.0-1.0)")
|
|
182
183
|
notes: str | None = Field(None, description="Additional observations")
|
|
@@ -3,13 +3,14 @@ from pathlib import Path
|
|
|
3
3
|
from jinja2 import Environment, FileSystemLoader
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
def generate_stack(context: dict):
|
|
6
|
+
def generate_stack(context: dict, is_dockerized: bool = True):
|
|
7
7
|
"""
|
|
8
8
|
Generates a cloud-init startup script from a Jinja2 template.
|
|
9
9
|
|
|
10
10
|
Args:
|
|
11
11
|
context: A dictionary containing information for rendering the template,
|
|
12
12
|
e.g., {'domain': 'example.com', 'email': 'user@example.com'}
|
|
13
|
+
is_dockerized: Whether to setup Docker and Docker Compose (default: True)
|
|
13
14
|
"""
|
|
14
15
|
# Path to the templates directory
|
|
15
16
|
template_dir = Path(__file__).parent / "templates"
|
|
@@ -17,9 +18,9 @@ def generate_stack(context: dict):
|
|
|
17
18
|
|
|
18
19
|
template = env.get_template("cloud-init.sh.j2")
|
|
19
20
|
|
|
20
|
-
# The non-dockerized logic has been removed as we are focusing on
|
|
21
|
-
# a purely Docker-based deployment strategy for simplicity and scalability.
|
|
22
21
|
# The context will contain all necessary variables for the template.
|
|
23
|
-
|
|
22
|
+
# Pass is_dockerized to the template for conditional setup
|
|
23
|
+
render_context = {**context, "is_dockerized": is_dockerized}
|
|
24
|
+
script = template.render(render_context)
|
|
24
25
|
|
|
25
26
|
return script
|
|
@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class DeploymentsManager(BaseManager):
|
|
15
|
-
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None) -> dict:
|
|
15
|
+
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True) -> dict:
|
|
16
16
|
"""Creates a new deployment."""
|
|
17
17
|
try:
|
|
18
18
|
payload = {
|
|
@@ -25,6 +25,8 @@ class DeploymentsManager(BaseManager):
|
|
|
25
25
|
payload["region"] = region
|
|
26
26
|
if size_slug:
|
|
27
27
|
payload["size_slug"] = size_slug
|
|
28
|
+
if is_dockerized is not None:
|
|
29
|
+
payload["is_dockerized"] = is_dockerized
|
|
28
30
|
|
|
29
31
|
response = self._client._request("POST", "/deployments", json=payload)
|
|
30
32
|
# Safe JSON parsing
|
|
@@ -95,7 +97,7 @@ class DeploymentsManager(BaseManager):
|
|
|
95
97
|
except Exception as e:
|
|
96
98
|
raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
|
|
97
99
|
|
|
98
|
-
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None) -> Iterator[dict]:
|
|
100
|
+
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True) -> Iterator[dict]:
|
|
99
101
|
"""
|
|
100
102
|
Creates a new deployment with real-time SSE log streaming.
|
|
101
103
|
|
|
@@ -129,6 +131,8 @@ class DeploymentsManager(BaseManager):
|
|
|
129
131
|
payload["region"] = region
|
|
130
132
|
if size_slug:
|
|
131
133
|
payload["size_slug"] = size_slug
|
|
134
|
+
if is_dockerized is not None:
|
|
135
|
+
payload["is_dockerized"] = is_dockerized
|
|
132
136
|
|
|
133
137
|
try:
|
|
134
138
|
# Use httpx to stream the SSE response
|
|
@@ -11,17 +11,30 @@ echo "--------------------------------" >> $LOG
|
|
|
11
11
|
mkdir -p /root/app
|
|
12
12
|
cd /root/app
|
|
13
13
|
|
|
14
|
-
# ---
|
|
15
|
-
echo "⚔️ [0/6]
|
|
14
|
+
# --- MERCILESS FIX: TERMINATE BACKGROUND PROCESSES ---
|
|
15
|
+
echo "⚔️ [0/6] Mercilessly Terminating Background Processes..." >> $LOG
|
|
16
|
+
|
|
17
|
+
kill_apt_processes() {
|
|
18
|
+
echo "🎯 Killing processes holding apt/dpkg locks..." >> $LOG
|
|
19
|
+
fuser -k /var/lib/dpkg/lock >/dev/null 2>&1
|
|
20
|
+
fuser -k /var/lib/apt/lists/lock >/dev/null 2>&1
|
|
21
|
+
fuser -k /var/lib/dpkg/lock-frontends >/dev/null 2>&1
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
# Explicitly stop and disable services that cause locks
|
|
16
25
|
systemctl stop unattended-upgrades.service || true
|
|
26
|
+
systemctl disable unattended-upgrades.service || true
|
|
17
27
|
systemctl stop apt-daily.service || true
|
|
28
|
+
systemctl disable apt-daily.service || true
|
|
18
29
|
systemctl stop apt-daily-upgrade.service || true
|
|
19
|
-
systemctl
|
|
20
|
-
|
|
30
|
+
systemctl disable apt-daily-upgrade.service || true
|
|
31
|
+
|
|
32
|
+
# Forcefully kill any remaining lock holders
|
|
33
|
+
kill_apt_processes
|
|
21
34
|
|
|
22
|
-
# Force remove locks if they exist
|
|
35
|
+
# Force remove locks if they still exist (The Nuclear Option)
|
|
23
36
|
rm -f /var/lib/dpkg/lock*
|
|
24
|
-
rm -f /var/lib/apt/lists/lock
|
|
37
|
+
rm -f /var/lib/apt/lists/lock*
|
|
25
38
|
rm -f /var/cache/apt/archives/lock
|
|
26
39
|
dpkg --configure -a || true
|
|
27
40
|
# -----------------------------------------------
|
|
@@ -31,19 +44,28 @@ echo "🔄 [1/5] Refreshing Package Lists..." >> $LOG
|
|
|
31
44
|
apt-get update
|
|
32
45
|
apt-get install -y python3-pip git curl
|
|
33
46
|
|
|
34
|
-
# 2.
|
|
47
|
+
# 2. Setup Environment
|
|
48
|
+
{% if is_dockerized %}
|
|
35
49
|
echo "🐳 [2/5] Installing Docker..." >> $LOG
|
|
36
50
|
apt-get install -y docker.io || (curl -fsSL https://get.docker.com | sh)
|
|
37
51
|
echo "🎶 [3/5] Installing Docker Compose..." >> $LOG
|
|
38
52
|
apt-get install -y docker-compose-v2
|
|
53
|
+
{% else %}
|
|
54
|
+
echo "🐍 [2/5] Setting up host-based Python environment..." >> $LOG
|
|
55
|
+
apt-get install -y python3-venv python3-dev build-essential
|
|
56
|
+
{% endif %}
|
|
39
57
|
|
|
40
|
-
#
|
|
41
|
-
|
|
58
|
+
# 3. Setup Reverse Proxy
|
|
59
|
+
{% if is_dockerized %}
|
|
60
|
+
echo "📦 [3/5] Installing Caddy..." >> $LOG
|
|
42
61
|
apt-get install -y debian-keyring debian-archive-keyring apt-transport-https
|
|
43
62
|
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
|
44
63
|
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list
|
|
45
64
|
apt-get update
|
|
46
65
|
apt-get install -y caddy
|
|
66
|
+
{% else %}
|
|
67
|
+
echo "🛡️ [3/5] Skipping Caddy for host deployment (setup manual reverse proxy if needed)." >> $LOG
|
|
68
|
+
{% endif %}
|
|
47
69
|
|
|
48
70
|
{% if domain %}
|
|
49
71
|
# Dynamically generate Caddyfile content
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|