xenfra 0.4.4__py3-none-any.whl → 0.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra/blueprints/__init__.py +55 -0
- xenfra/blueprints/base.py +85 -0
- xenfra/blueprints/cache.py +286 -0
- xenfra/blueprints/dry_run.py +251 -0
- xenfra/blueprints/e2b.py +101 -0
- xenfra/blueprints/factory.py +113 -0
- xenfra/blueprints/railpack.py +319 -0
- xenfra/blueprints/validation.py +182 -0
- xenfra/commands/deployments.py +303 -78
- xenfra/commands/intelligence.py +5 -5
- xenfra/main.py +4 -1
- {xenfra-0.4.4.dist-info → xenfra-0.4.5.dist-info}/METADATA +1 -1
- xenfra-0.4.5.dist-info/RECORD +29 -0
- {xenfra-0.4.4.dist-info → xenfra-0.4.5.dist-info}/WHEEL +1 -1
- xenfra-0.4.4.dist-info/RECORD +0 -21
- {xenfra-0.4.4.dist-info → xenfra-0.4.5.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tier 1: Local validation - Fast syntax and import checking.
|
|
3
|
+
|
|
4
|
+
This module provides lightning-fast (~100ms) validation of project files
|
|
5
|
+
before any expensive operations. It checks:
|
|
6
|
+
- Python syntax errors
|
|
7
|
+
- Import errors (missing dependencies)
|
|
8
|
+
- Config file validity
|
|
9
|
+
- Basic project structure
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import ast
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Optional
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ValidationResult:
|
|
22
|
+
"""Result of Tier 1 validation."""
|
|
23
|
+
success: bool
|
|
24
|
+
errors: list[str] = field(default_factory=list)
|
|
25
|
+
warnings: list[str] = field(default_factory=list)
|
|
26
|
+
|
|
27
|
+
def add_error(self, message: str):
|
|
28
|
+
self.errors.append(message)
|
|
29
|
+
self.success = False
|
|
30
|
+
|
|
31
|
+
def add_warning(self, message: str):
|
|
32
|
+
self.warnings.append(message)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class LocalValidator:
|
|
36
|
+
"""Fast local validation for common project issues."""
|
|
37
|
+
|
|
38
|
+
def validate(self, project_path: Path) -> ValidationResult:
|
|
39
|
+
"""
|
|
40
|
+
Run all Tier 1 validations.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
project_path: Path to the project directory
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
ValidationResult with any errors or warnings
|
|
47
|
+
"""
|
|
48
|
+
result = ValidationResult(success=True)
|
|
49
|
+
|
|
50
|
+
# Check Python files for syntax errors
|
|
51
|
+
self._validate_python_syntax(project_path, result)
|
|
52
|
+
|
|
53
|
+
# Validate config files
|
|
54
|
+
self._validate_configs(project_path, result)
|
|
55
|
+
|
|
56
|
+
# Check for required files
|
|
57
|
+
self._validate_required_files(project_path, result)
|
|
58
|
+
|
|
59
|
+
return result
|
|
60
|
+
|
|
61
|
+
def _validate_python_syntax(self, project_path: Path, result: ValidationResult):
|
|
62
|
+
"""Check all Python files for syntax errors."""
|
|
63
|
+
python_files = list(project_path.rglob("*.py"))
|
|
64
|
+
|
|
65
|
+
for py_file in python_files:
|
|
66
|
+
# Skip common virtual environment directories
|
|
67
|
+
if any(part.startswith(".") or part in ["venv", ".venv", "env", ".env", "__pycache__"]
|
|
68
|
+
for part in py_file.parts):
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
content = py_file.read_text(encoding="utf-8")
|
|
73
|
+
ast.parse(content)
|
|
74
|
+
except SyntaxError as e:
|
|
75
|
+
result.add_error(
|
|
76
|
+
f"Syntax error in {py_file.relative_to(project_path)}:{e.lineno}: {e.msg}"
|
|
77
|
+
)
|
|
78
|
+
except UnicodeDecodeError:
|
|
79
|
+
result.add_warning(f"Could not read {py_file.relative_to(project_path)} (encoding issue)")
|
|
80
|
+
|
|
81
|
+
def _validate_configs(self, project_path: Path, result: ValidationResult):
|
|
82
|
+
"""Validate configuration files."""
|
|
83
|
+
# requirements.txt - check format
|
|
84
|
+
req_file = project_path / "requirements.txt"
|
|
85
|
+
if req_file.exists():
|
|
86
|
+
try:
|
|
87
|
+
content = req_file.read_text(encoding="utf-8")
|
|
88
|
+
for line_num, line in enumerate(content.split("\n"), 1):
|
|
89
|
+
line = line.strip()
|
|
90
|
+
if not line or line.startswith("#"):
|
|
91
|
+
continue
|
|
92
|
+
# Basic format check
|
|
93
|
+
if ";" in line and "sys_platform" not in line and "python_version" not in line:
|
|
94
|
+
# Might be an error, but could also be valid
|
|
95
|
+
pass
|
|
96
|
+
except Exception as e:
|
|
97
|
+
result.add_error(f"Error reading requirements.txt: {e}")
|
|
98
|
+
|
|
99
|
+
# package.json - validate JSON
|
|
100
|
+
pkg_file = project_path / "package.json"
|
|
101
|
+
if pkg_file.exists():
|
|
102
|
+
try:
|
|
103
|
+
content = pkg_file.read_text(encoding="utf-8")
|
|
104
|
+
json.loads(content)
|
|
105
|
+
except json.JSONDecodeError as e:
|
|
106
|
+
result.add_error(f"Invalid package.json: {e}")
|
|
107
|
+
|
|
108
|
+
# pyproject.toml - basic check
|
|
109
|
+
pyproject = project_path / "pyproject.toml"
|
|
110
|
+
if pyproject.exists():
|
|
111
|
+
try:
|
|
112
|
+
import tomllib
|
|
113
|
+
content = pyproject.read_bytes()
|
|
114
|
+
tomllib.loads(content.decode("utf-8"))
|
|
115
|
+
except ImportError:
|
|
116
|
+
# Python < 3.11, try toml library
|
|
117
|
+
try:
|
|
118
|
+
import toml
|
|
119
|
+
toml.load(pyproject)
|
|
120
|
+
except ImportError:
|
|
121
|
+
pass # Skip if no toml library
|
|
122
|
+
except Exception as e:
|
|
123
|
+
result.add_error(f"Invalid pyproject.toml: {e}")
|
|
124
|
+
except Exception as e:
|
|
125
|
+
result.add_error(f"Invalid pyproject.toml: {e}")
|
|
126
|
+
|
|
127
|
+
def _validate_required_files(self, project_path: Path, result: ValidationResult):
|
|
128
|
+
"""Check for required files based on detected project type."""
|
|
129
|
+
# Detect project type
|
|
130
|
+
has_requirements = (project_path / "requirements.txt").exists()
|
|
131
|
+
has_pyproject = (project_path / "pyproject.toml").exists()
|
|
132
|
+
has_package_json = (project_path / "package.json").exists()
|
|
133
|
+
has_go_mod = (project_path / "go.mod").exists()
|
|
134
|
+
has_cargo = (project_path / "Cargo.toml").exists()
|
|
135
|
+
|
|
136
|
+
# Check for main entry point (Python)
|
|
137
|
+
if has_requirements or has_pyproject:
|
|
138
|
+
main_files = [
|
|
139
|
+
project_path / "main.py",
|
|
140
|
+
project_path / "app.py",
|
|
141
|
+
project_path / "manage.py", # Django
|
|
142
|
+
]
|
|
143
|
+
app_dirs = [
|
|
144
|
+
project_path / "app",
|
|
145
|
+
project_path / project_path.name.lower().replace("-", "_").replace(" ", "_"),
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
if not any(f.exists() for f in main_files):
|
|
149
|
+
# Check if there's a package directory with __main__.py or similar
|
|
150
|
+
has_entry = False
|
|
151
|
+
for app_dir in app_dirs:
|
|
152
|
+
if app_dir.is_dir():
|
|
153
|
+
if (app_dir / "__main__.py").exists() or (app_dir / "main.py").exists():
|
|
154
|
+
has_entry = True
|
|
155
|
+
break
|
|
156
|
+
|
|
157
|
+
if not has_entry:
|
|
158
|
+
result.add_warning(
|
|
159
|
+
"No clear Python entry point found (main.py, app.py, or package/__main__.py)"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Check for main entry point (Node.js)
|
|
163
|
+
if has_package_json:
|
|
164
|
+
pkg_json = json.loads((project_path / "package.json").read_text())
|
|
165
|
+
if "main" not in pkg_json and "start" not in str(pkg_json.get("scripts", {})):
|
|
166
|
+
result.add_warning(
|
|
167
|
+
"No 'main' field or 'start' script found in package.json"
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def validate_project(project_path: Path) -> ValidationResult:
|
|
172
|
+
"""
|
|
173
|
+
Convenience function for Tier 1 validation.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
project_path: Path to the project directory
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
ValidationResult with errors and warnings
|
|
180
|
+
"""
|
|
181
|
+
validator = LocalValidator()
|
|
182
|
+
return validator.validate(project_path)
|
xenfra/commands/deployments.py
CHANGED
|
@@ -3,6 +3,7 @@ Deployment commands for Xenfra CLI.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
+
from pathlib import Path
|
|
6
7
|
|
|
7
8
|
import click
|
|
8
9
|
from rich.console import Console
|
|
@@ -15,6 +16,7 @@ from xenfra_sdk.privacy import scrub_logs
|
|
|
15
16
|
from ..utils.auth import API_BASE_URL, get_auth_token
|
|
16
17
|
from ..utils.codebase import has_xenfra_config
|
|
17
18
|
from ..utils.config import apply_patch, read_xenfra_yaml
|
|
19
|
+
from ..blueprints import run_dry_run
|
|
18
20
|
from ..utils.validation import (
|
|
19
21
|
validate_branch_name,
|
|
20
22
|
validate_deployment_id,
|
|
@@ -419,23 +421,47 @@ def delete(project_id, show_details):
|
|
|
419
421
|
@click.option("--size", help="DigitalOcean size slug override")
|
|
420
422
|
@click.option("--no-heal", is_flag=True, help="Disable auto-healing on failure")
|
|
421
423
|
@click.option("--cleanup-on-failure", is_flag=True, help="Automatically cleanup resources if deployment fails")
|
|
422
|
-
|
|
424
|
+
@click.option("--force-sandbox", is_flag=True, help="Force full sandbox validation (Tier 3)")
|
|
425
|
+
@click.option("--skip-sandbox", is_flag=True, help="Skip sandbox validation for faster iteration")
|
|
426
|
+
def deploy(project_name, git_repo, branch, framework, region, size, no_heal, cleanup_on_failure, force_sandbox, skip_sandbox):
|
|
423
427
|
"""
|
|
424
|
-
Deploy current project to DigitalOcean with
|
|
428
|
+
Deploy current project to DigitalOcean with 3-tier dry-run validation.
|
|
425
429
|
|
|
430
|
+
Every deployment is validated before touching real infrastructure:
|
|
431
|
+
|
|
432
|
+
\b
|
|
433
|
+
[bold]Tier 1[/bold]: Local validation (~100ms) - Syntax, imports, config
|
|
434
|
+
[bold]Tier 2[/bold]: Railpack plan (~1-2s) - Build plan generation
|
|
435
|
+
[bold]Tier 3[/bold]: E2B Sandbox (~30-60s) - Full build validation (smart detection)
|
|
436
|
+
|
|
437
|
+
This "Dry Run First" architecture ensures deployments never fail due to
|
|
438
|
+
avoidable errors. Set E2B_API_KEY for full sandbox validation.
|
|
439
|
+
|
|
440
|
+
\b
|
|
426
441
|
Deploys your application with zero configuration. The CLI will:
|
|
427
|
-
1.
|
|
442
|
+
1. Run 3-tier dry-run validation
|
|
428
443
|
2. Create a deployment
|
|
429
|
-
3. Auto-diagnose and fix failures (unless --no-heal is set
|
|
430
|
-
|
|
431
|
-
|
|
444
|
+
3. Auto-diagnose and fix failures (unless --no-heal is set)
|
|
445
|
+
|
|
446
|
+
Environment Variables:
|
|
447
|
+
- E2B_API_KEY: API key for sandbox validation
|
|
448
|
+
- XENFRA_AI=off: Disable AI features
|
|
449
|
+
- XENFRA_SANDBOX=force: Always run Tier 3
|
|
450
|
+
- XENFRA_SANDBOX=skip: Skip Tier 3 validation
|
|
451
|
+
- XENFRA_SANDBOX=auto: Smart detection (default)
|
|
432
452
|
"""
|
|
433
|
-
# Check
|
|
434
|
-
|
|
453
|
+
# Check XENFRA_AI environment variable
|
|
454
|
+
ai_mode = os.environ.get("XENFRA_AI", "on").lower()
|
|
455
|
+
no_ai = ai_mode == "off"
|
|
435
456
|
if no_ai:
|
|
436
|
-
console.print("[yellow]
|
|
457
|
+
console.print("[yellow]XENFRA_AI=off is set. Auto-healing disabled.[/yellow]")
|
|
437
458
|
no_heal = True
|
|
438
459
|
|
|
460
|
+
# ==========================================
|
|
461
|
+
# 3-TIER DRY-RUN VALIDATION ("Dry Run First")
|
|
462
|
+
# ==========================================
|
|
463
|
+
project_path = Path(os.getcwd())
|
|
464
|
+
|
|
439
465
|
# Check for xenfra.yaml
|
|
440
466
|
if not has_xenfra_config():
|
|
441
467
|
console.print("[yellow]No xenfra.yaml found.[/yellow]")
|
|
@@ -579,9 +605,134 @@ def deploy(project_name, git_repo, branch, framework, region, size, no_heal, cle
|
|
|
579
605
|
# Retry loop for auto-healing
|
|
580
606
|
attempt = 0
|
|
581
607
|
deployment_id = None
|
|
582
|
-
|
|
608
|
+
|
|
609
|
+
# Context manager for temp dir (if cloning)
|
|
610
|
+
import tempfile
|
|
611
|
+
import shutil
|
|
612
|
+
import subprocess
|
|
613
|
+
|
|
614
|
+
temp_dir_obj = None
|
|
615
|
+
original_project_path = project_path
|
|
616
|
+
|
|
583
617
|
try:
|
|
584
618
|
with get_client() as client:
|
|
619
|
+
from ..utils.file_sync import scan_project_files_cached, ensure_gitignore_ignored
|
|
620
|
+
|
|
621
|
+
# --- 1. PREPARE SOURCE CODE ---
|
|
622
|
+
if git_repo:
|
|
623
|
+
console.print(f"[cyan]📥 Cloning {git_repo}...[/cyan]")
|
|
624
|
+
temp_dir_obj = tempfile.TemporaryDirectory()
|
|
625
|
+
temp_dir_path = Path(temp_dir_obj.name)
|
|
626
|
+
|
|
627
|
+
try:
|
|
628
|
+
# Shallow clone for speed
|
|
629
|
+
cmd = ["git", "clone", "--depth", "1"]
|
|
630
|
+
if branch:
|
|
631
|
+
cmd.extend(["-b", branch])
|
|
632
|
+
cmd.extend([git_repo, str(temp_dir_path)])
|
|
633
|
+
|
|
634
|
+
subprocess.run(cmd, check=True, capture_output=True)
|
|
635
|
+
project_path = temp_dir_path
|
|
636
|
+
console.print("[green]✓ Repository cloned[/green]")
|
|
637
|
+
except subprocess.CalledProcessError as e:
|
|
638
|
+
console.print(f"[bold red]Failed to clone repository: {e}[/bold red]")
|
|
639
|
+
raise click.Abort()
|
|
640
|
+
else:
|
|
641
|
+
# Local directory
|
|
642
|
+
# Protect privacy: ensure .xenfra is in .gitignore
|
|
643
|
+
if ensure_gitignore_ignored():
|
|
644
|
+
console.print("[dim] - Added .xenfra to .gitignore for privacy[/dim]")
|
|
645
|
+
|
|
646
|
+
# --- 2. SCAN FILES ---
|
|
647
|
+
console.print("[cyan]📁 Scanning project files...[/cyan]")
|
|
648
|
+
|
|
649
|
+
# We need to temporarily change CWD for scan_project_files_cached to work if we cloned
|
|
650
|
+
# OR pass path to it. Assuming it scans CWD, we might need chdir
|
|
651
|
+
import os
|
|
652
|
+
cwd = os.getcwd()
|
|
653
|
+
try:
|
|
654
|
+
if project_path != Path(cwd):
|
|
655
|
+
os.chdir(project_path)
|
|
656
|
+
file_manifest = scan_project_files_cached()
|
|
657
|
+
finally:
|
|
658
|
+
os.chdir(cwd)
|
|
659
|
+
|
|
660
|
+
console.print(f"[dim]Found {len(file_manifest)} files[/dim]")
|
|
661
|
+
|
|
662
|
+
if not file_manifest:
|
|
663
|
+
console.print("[bold red]Error: No files found.[/bold red]")
|
|
664
|
+
raise click.Abort()
|
|
665
|
+
|
|
666
|
+
# --- 3. UPLOAD FILES (Required for Tier 3 & Deployment) ---
|
|
667
|
+
# Check which files need uploading
|
|
668
|
+
console.print("[cyan]🔍 Checking file cache...[/cyan]")
|
|
669
|
+
check_result = client.files.check(file_manifest)
|
|
670
|
+
missing = check_result.get('missing', [])
|
|
671
|
+
cached = check_result.get('cached', 0)
|
|
672
|
+
|
|
673
|
+
if cached > 0:
|
|
674
|
+
console.print(f"[green]✓ {cached} files already cached on server[/green]")
|
|
675
|
+
|
|
676
|
+
# Upload missing files
|
|
677
|
+
if missing:
|
|
678
|
+
console.print(f"[cyan]☁️ Uploading {len(missing)} files...[/cyan]")
|
|
679
|
+
# We need absolute paths for upload reading
|
|
680
|
+
# If we cloned, file_manifest paths are relative to temp_dir
|
|
681
|
+
# But upload_files needs to know where to read from.
|
|
682
|
+
# Let's patch file_manifest with abs_paths for the uploader
|
|
683
|
+
|
|
684
|
+
upload_manifest = []
|
|
685
|
+
for f in file_manifest:
|
|
686
|
+
f_copy = f.copy()
|
|
687
|
+
f_copy["abs_path"] = str(project_path / f["path"])
|
|
688
|
+
upload_manifest.append(f_copy)
|
|
689
|
+
|
|
690
|
+
uploaded = client.files.upload_files(
|
|
691
|
+
upload_manifest,
|
|
692
|
+
missing,
|
|
693
|
+
progress_callback=lambda done, total: console.print(f"[dim] Progress: {done}/{total}[/dim]") if done % 10 == 0 or done == total else None
|
|
694
|
+
)
|
|
695
|
+
console.print(f"[green]✓ Uploaded {uploaded} files[/green]")
|
|
696
|
+
else:
|
|
697
|
+
console.print("[green]✓ All files synced[/green]")
|
|
698
|
+
|
|
699
|
+
# --- 4. RUN DRY RUN ---
|
|
700
|
+
console.print(Panel(
|
|
701
|
+
"[bold]🔍 Running 3-Tier Dry-Run Validation[/bold]\n"
|
|
702
|
+
"Tier 1: Local syntax check (~100ms)\n"
|
|
703
|
+
"Tier 2: Railpack build plan (~1-2s)\n"
|
|
704
|
+
"Tier 3: E2B sandbox via Xenfra API (smart detection)",
|
|
705
|
+
title="[bold cyan]Dry Run First Architecture[/bold cyan]",
|
|
706
|
+
border_style="cyan"
|
|
707
|
+
))
|
|
708
|
+
|
|
709
|
+
with console.status("[cyan]Running validation...[/cyan]"):
|
|
710
|
+
dry_run_result = run_dry_run(
|
|
711
|
+
project_path,
|
|
712
|
+
force_sandbox=force_sandbox, # or (force_sandbox or check_result['missing'])?
|
|
713
|
+
skip_sandbox=skip_sandbox,
|
|
714
|
+
api_client=client,
|
|
715
|
+
file_manifest=file_manifest
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
# Display results
|
|
719
|
+
console.print(dry_run_result.get_summary())
|
|
720
|
+
console.print()
|
|
721
|
+
|
|
722
|
+
# Check if dry-run failed
|
|
723
|
+
if not dry_run_result.success:
|
|
724
|
+
console.print("[bold red]❌ Dry-run validation failed[/bold red]")
|
|
725
|
+
console.print("[yellow]Fix the errors above before deploying.[/yellow]")
|
|
726
|
+
console.print()
|
|
727
|
+
console.print("[dim]Tip: Run with --skip-sandbox to skip Tier 3[/dim]")
|
|
728
|
+
raise click.Abort()
|
|
729
|
+
|
|
730
|
+
console.print("[bold green]✅ Dry-run validation passed[/bold green]")
|
|
731
|
+
console.print()
|
|
732
|
+
|
|
733
|
+
# Prepare manifest for API (remove local-only fields if any, strictly path/sha/size)
|
|
734
|
+
api_file_manifest = [{"path": f["path"], "sha": f["sha"], "size": f["size"]} for f in file_manifest]
|
|
735
|
+
|
|
585
736
|
while attempt < MAX_RETRY_ATTEMPTS:
|
|
586
737
|
attempt += 1
|
|
587
738
|
|
|
@@ -594,75 +745,60 @@ def deploy(project_name, git_repo, branch, framework, region, size, no_heal, cle
|
|
|
594
745
|
|
|
595
746
|
# Detect framework if not provided (AI-powered Zen Mode)
|
|
596
747
|
if not framework:
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
if
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
is_dockerized = analysis.is_dockerized
|
|
607
|
-
# Override port if AI detected it and config didn't set one
|
|
608
|
-
if not port_from_config and analysis.port:
|
|
609
|
-
port = analysis.port
|
|
610
|
-
# Override port and size if AI has strong recommendations
|
|
611
|
-
if not size and analysis.instance_size:
|
|
612
|
-
size = "s-1vcpu-1gb" if analysis.instance_size == "basic" else "s-2vcpu-4gb"
|
|
613
|
-
|
|
614
|
-
mode_str = "Docker" if is_dockerized else "Bare Metal"
|
|
615
|
-
console.print(f"[green]✓ Detected {framework.upper()} project ({mode_str} Mode)[/green] (Port: {port})")
|
|
748
|
+
if no_ai:
|
|
749
|
+
# ... static logic existing ...
|
|
750
|
+
console.print("[yellow]⚠ AI disabled. Using static detection...[/yellow]")
|
|
751
|
+
# Simple static checks based on Tier 2 result potentially?
|
|
752
|
+
if dry_run_result.tier2:
|
|
753
|
+
framework = dry_run_result.tier2.framework
|
|
754
|
+
port = dry_run_result.tier2.port
|
|
755
|
+
is_dockerized = True
|
|
756
|
+
console.print(f"[green]✓ Detected {framework.upper()} (from Dry Run)[/green]")
|
|
616
757
|
else:
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
is_dockerized = True
|
|
620
|
-
except Exception as e:
|
|
621
|
-
console.print(f"[yellow]⚠ AI detection failed: {e}. Defaulting to 'fastapi'[/yellow]")
|
|
622
|
-
framework = "fastapi"
|
|
623
|
-
is_dockerized = True
|
|
624
|
-
|
|
625
|
-
# Delta upload: if no git_repo, scan and upload local files
|
|
626
|
-
file_manifest = None
|
|
627
|
-
if not git_repo:
|
|
628
|
-
from ..utils.file_sync import scan_project_files_cached, ensure_gitignore_ignored
|
|
629
|
-
|
|
630
|
-
# Protect privacy: ensure .xenfra is in .gitignore
|
|
631
|
-
if ensure_gitignore_ignored():
|
|
632
|
-
console.print("[dim] - Added .xenfra to .gitignore for privacy[/dim]")
|
|
633
|
-
|
|
634
|
-
console.print("[cyan]📁 Scanning project files...[/cyan]")
|
|
635
|
-
|
|
636
|
-
file_manifest = scan_project_files_cached()
|
|
637
|
-
console.print(f"[dim]Found {len(file_manifest)} files[/dim]")
|
|
638
|
-
|
|
639
|
-
if not file_manifest:
|
|
640
|
-
console.print("[bold red]Error: No files found to deploy.[/bold red]")
|
|
641
|
-
raise click.Abort()
|
|
642
|
-
|
|
643
|
-
# Check which files need uploading
|
|
644
|
-
console.print("[cyan]🔍 Checking file cache...[/cyan]")
|
|
645
|
-
check_result = client.files.check(file_manifest)
|
|
646
|
-
missing = check_result.get('missing', [])
|
|
647
|
-
cached = check_result.get('cached', 0)
|
|
648
|
-
|
|
649
|
-
if cached > 0:
|
|
650
|
-
console.print(f"[green]✓ {cached} files already cached[/green]")
|
|
651
|
-
|
|
652
|
-
# Upload missing files
|
|
653
|
-
if missing:
|
|
654
|
-
console.print(f"[cyan]☁️ Uploading {len(missing)} files...[/cyan]")
|
|
655
|
-
uploaded = client.files.upload_files(
|
|
656
|
-
file_manifest,
|
|
657
|
-
missing,
|
|
658
|
-
progress_callback=lambda done, total: console.print(f"[dim] Progress: {done}/{total}[/dim]") if done % 10 == 0 or done == total else None
|
|
659
|
-
)
|
|
660
|
-
console.print(f"[green]✓ Uploaded {uploaded} files[/green]")
|
|
758
|
+
framework = "fastapi"
|
|
759
|
+
port = 8000
|
|
661
760
|
else:
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
761
|
+
# AI-powered detection
|
|
762
|
+
# We can use Tier 2 result for speed instead of full AI scan if available
|
|
763
|
+
if dry_run_result.tier2 and dry_run_result.tier2.framework != "unknown":
|
|
764
|
+
framework = dry_run_result.tier2.framework
|
|
765
|
+
# config overrides...
|
|
766
|
+
if not is_dockerized_from_config:
|
|
767
|
+
is_dockerized = True # Railpack is docker based
|
|
768
|
+
if not port_from_config:
|
|
769
|
+
port = dry_run_result.tier2.port
|
|
770
|
+
console.print(f"[green]✓ Detected {framework.upper()} (from Railpack)[/green]")
|
|
771
|
+
else:
|
|
772
|
+
# Fallback to AI
|
|
773
|
+
console.print("[cyan]🔍 AI Auto-detecting project type...[/cyan]")
|
|
774
|
+
try:
|
|
775
|
+
# We need to scan code snippet from PROJECT_PATH (temp or local)
|
|
776
|
+
from ..utils.codebase import scan_codebase
|
|
777
|
+
# scan_codebase needs CWD to be correct
|
|
778
|
+
curr = os.getcwd()
|
|
779
|
+
os.chdir(project_path)
|
|
780
|
+
try:
|
|
781
|
+
code_snippets = scan_codebase()
|
|
782
|
+
finally:
|
|
783
|
+
os.chdir(curr)
|
|
784
|
+
|
|
785
|
+
if code_snippets:
|
|
786
|
+
analysis = client.intelligence.analyze_codebase(code_snippets)
|
|
787
|
+
framework = analysis.framework
|
|
788
|
+
if not is_dockerized_from_config: is_dockerized = analysis.is_dockerized
|
|
789
|
+
if not port_from_config and analysis.port: port = analysis.port
|
|
790
|
+
if not size and analysis.instance_size: size = "s-1vcpu-1gb" if analysis.instance_size == "basic" else "s-2vcpu-4gb"
|
|
791
|
+
mode_str = "Docker" if is_dockerized else "Bare Metal"
|
|
792
|
+
console.print(f"[green]✓ Detected {framework.upper()} project ({mode_str} Mode)[/green] (Port: {port})")
|
|
793
|
+
else:
|
|
794
|
+
framework = "fastapi"
|
|
795
|
+
is_dockerized = True
|
|
796
|
+
except Exception as e:
|
|
797
|
+
framework = "fastapi"
|
|
798
|
+
is_dockerized = True
|
|
799
|
+
|
|
800
|
+
# Delta upload was already done above!
|
|
801
|
+
# file_manifest is ready (api_file_manifest)
|
|
666
802
|
|
|
667
803
|
# Create deployment with real-time streaming
|
|
668
804
|
try:
|
|
@@ -681,7 +817,7 @@ def deploy(project_name, git_repo, branch, framework, region, size, no_heal, cle
|
|
|
681
817
|
database=database,
|
|
682
818
|
package_manager=package_manager,
|
|
683
819
|
dependency_file=dependency_file,
|
|
684
|
-
file_manifest=
|
|
820
|
+
file_manifest=api_file_manifest,
|
|
685
821
|
cleanup_on_failure=cleanup_on_failure,
|
|
686
822
|
services=services, # Microservices support
|
|
687
823
|
mode=mode, # Deployment mode
|
|
@@ -1131,3 +1267,92 @@ def report(deployment_id, output_format):
|
|
|
1131
1267
|
pass
|
|
1132
1268
|
except Exception as e:
|
|
1133
1269
|
console.print(f"[bold red]Unexpected error: {e}[/bold red]")
|
|
1270
|
+
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
@click.command(name="dry-run")
|
|
1274
|
+
@click.option("--force-sandbox", is_flag=True, help="Force full sandbox validation (Tier 3)")
|
|
1275
|
+
@click.option("--skip-sandbox", is_flag=True, help="Skip sandbox validation for faster iteration")
|
|
1276
|
+
@click.option("--verbose", "-v", is_flag=True, help="Show detailed output")
|
|
1277
|
+
def dry_run_command(force_sandbox, skip_sandbox, verbose):
|
|
1278
|
+
"""
|
|
1279
|
+
Run 3-tier dry-run validation without deploying.
|
|
1280
|
+
|
|
1281
|
+
Validates your project through all three tiers without
|
|
1282
|
+
actually deploying to infrastructure:
|
|
1283
|
+
|
|
1284
|
+
\b
|
|
1285
|
+
[bold]Tier 1[/bold]: Local validation - Syntax, imports, config files
|
|
1286
|
+
[bold]Tier 2[/bold]: Railpack plan - Generate build plan
|
|
1287
|
+
[bold]Tier 3[/bold]: E2B Sandbox - Full build validation via Xenfra API
|
|
1288
|
+
|
|
1289
|
+
This is useful for:
|
|
1290
|
+
- Validating changes before deploying
|
|
1291
|
+
- CI/CD pipelines
|
|
1292
|
+
- Checking for syntax errors quickly
|
|
1293
|
+
|
|
1294
|
+
Tier 3 runs on Xenfra's servers - no local setup needed.
|
|
1295
|
+
"""
|
|
1296
|
+
from pathlib import Path
|
|
1297
|
+
from ..blueprints import run_dry_run
|
|
1298
|
+
from ..utils.file_sync import scan_project_files_cached, ensure_gitignore_ignored
|
|
1299
|
+
|
|
1300
|
+
project_path = Path(os.getcwd())
|
|
1301
|
+
|
|
1302
|
+
# Get authenticated client for Tier 3 sandbox
|
|
1303
|
+
try:
|
|
1304
|
+
client = get_client()
|
|
1305
|
+
except click.Abort:
|
|
1306
|
+
return
|
|
1307
|
+
|
|
1308
|
+
# Scan project files
|
|
1309
|
+
if ensure_gitignore_ignored():
|
|
1310
|
+
console.print("[dim] - Added .xenfra to .gitignore for privacy[/dim]")
|
|
1311
|
+
|
|
1312
|
+
console.print("[cyan]📁 Scanning project files...[/cyan]")
|
|
1313
|
+
file_manifest = scan_project_files_cached()
|
|
1314
|
+
console.print(f"[dim]Found {len(file_manifest)} files[/dim]")
|
|
1315
|
+
|
|
1316
|
+
if not file_manifest:
|
|
1317
|
+
console.print("[bold red]Error: No files found to validate.[/bold red]")
|
|
1318
|
+
raise click.Abort()
|
|
1319
|
+
|
|
1320
|
+
console.print(Panel(
|
|
1321
|
+
"[bold]🔍 Running 3-Tier Dry-Run Validation[/bold]\n"
|
|
1322
|
+
"Validating your project without deploying...",
|
|
1323
|
+
title="[bold cyan]Dry Run[/bold cyan]",
|
|
1324
|
+
border_style="cyan"
|
|
1325
|
+
))
|
|
1326
|
+
|
|
1327
|
+
# Run dry-run
|
|
1328
|
+
with console.status("[cyan]Running validation...[/cyan]"):
|
|
1329
|
+
result = run_dry_run(
|
|
1330
|
+
project_path,
|
|
1331
|
+
force_sandbox=force_sandbox,
|
|
1332
|
+
skip_sandbox=skip_sandbox,
|
|
1333
|
+
api_client=client,
|
|
1334
|
+
file_manifest=file_manifest
|
|
1335
|
+
)
|
|
1336
|
+
|
|
1337
|
+
# Display results
|
|
1338
|
+
console.print(result.get_summary())
|
|
1339
|
+
console.print()
|
|
1340
|
+
|
|
1341
|
+
# Detailed output if verbose
|
|
1342
|
+
if verbose and result.tier2:
|
|
1343
|
+
console.print("[bold]Build Plan Details:[/bold]")
|
|
1344
|
+
console.print(f" Framework: {result.tier2.framework}")
|
|
1345
|
+
console.print(f" Language: {result.tier2.language}")
|
|
1346
|
+
console.print(f" Package Manager: {result.tier2.package_manager}")
|
|
1347
|
+
console.print(f" Port: {result.tier2.port}")
|
|
1348
|
+
if result.tier2.start_command:
|
|
1349
|
+
console.print(f" Start Command: {result.tier2.start_command}")
|
|
1350
|
+
console.print()
|
|
1351
|
+
|
|
1352
|
+
# Final status
|
|
1353
|
+
if result.success:
|
|
1354
|
+
console.print("[bold green]✅ All validations passed - Ready to deploy![/bold green]")
|
|
1355
|
+
console.print("[dim]Run 'xenfra deploy' to deploy to infrastructure.[/dim]")
|
|
1356
|
+
else:
|
|
1357
|
+
console.print("[bold red]❌ Validation failed - Fix errors before deploying[/bold red]")
|
|
1358
|
+
raise click.Abort()
|
xenfra/commands/intelligence.py
CHANGED
|
@@ -87,7 +87,7 @@ def init(manual, accept_all):
|
|
|
87
87
|
For microservices projects (multiple services), generates xenfra-services.yaml.
|
|
88
88
|
|
|
89
89
|
Use --manual to skip AI and configure interactively.
|
|
90
|
-
Set
|
|
90
|
+
Set XENFRA_AI=off environment variable to force manual mode globally.
|
|
91
91
|
"""
|
|
92
92
|
# Check if config already exists
|
|
93
93
|
if has_xenfra_config():
|
|
@@ -148,10 +148,10 @@ def init(manual, accept_all):
|
|
|
148
148
|
console.print(f"[dim]Note: Microservices detection skipped: {e}[/dim]\n")
|
|
149
149
|
|
|
150
150
|
|
|
151
|
-
# Check for
|
|
152
|
-
|
|
153
|
-
if
|
|
154
|
-
console.print("[yellow]
|
|
151
|
+
# Check for XENFRA_AI environment variable
|
|
152
|
+
ai_mode = os.environ.get("XENFRA_AI", "on").lower()
|
|
153
|
+
if ai_mode == "off" and not manual:
|
|
154
|
+
console.print("[yellow]XENFRA_AI=off is set. Using manual mode.[/yellow]")
|
|
155
155
|
manual = True
|
|
156
156
|
|
|
157
157
|
# Manual mode - interactive prompts
|
xenfra/main.py
CHANGED
|
@@ -10,7 +10,7 @@ import click
|
|
|
10
10
|
from rich.console import Console
|
|
11
11
|
|
|
12
12
|
from .commands.auth import auth
|
|
13
|
-
from .commands.deployments import delete, deploy, logs, report, status
|
|
13
|
+
from .commands.deployments import delete, deploy, dry_run_command, logs, report, status
|
|
14
14
|
from .commands.intelligence import analyze, diagnose, init
|
|
15
15
|
from .commands.projects import projects
|
|
16
16
|
from .commands.security_cmd import security
|
|
@@ -30,12 +30,14 @@ def cli():
|
|
|
30
30
|
Quick Start:
|
|
31
31
|
xenfra auth login # Authenticate with Xenfra
|
|
32
32
|
xenfra init # Initialize your project (AI-powered)
|
|
33
|
+
xenfra dry-run # Validate before deploying (3-tier)
|
|
33
34
|
xenfra deploy # Deploy to DigitalOcean
|
|
34
35
|
|
|
35
36
|
Commands:
|
|
36
37
|
auth Authentication (login, logout, whoami)
|
|
37
38
|
projects Manage projects (list, show, delete)
|
|
38
39
|
init Smart project initialization (AI-powered)
|
|
40
|
+
dry-run Validate project without deploying (3-tier)
|
|
39
41
|
diagnose Diagnose deployment failures (AI-powered)
|
|
40
42
|
analyze Analyze codebase without creating config
|
|
41
43
|
|
|
@@ -61,6 +63,7 @@ cli.add_command(analyze)
|
|
|
61
63
|
|
|
62
64
|
# Register deployment commands at root level
|
|
63
65
|
cli.add_command(deploy)
|
|
66
|
+
cli.add_command(dry_run_command)
|
|
64
67
|
cli.add_command(status)
|
|
65
68
|
cli.add_command(logs)
|
|
66
69
|
cli.add_command(report)
|