xenfra-sdk 0.2.1__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/PKG-INFO +1 -1
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/pyproject.toml +1 -1
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/client.py +90 -87
- xenfra_sdk-0.2.2/src/xenfra_sdk/dockerizer.py +194 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/engine.py +190 -42
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/models.py +1 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/deployments.py +15 -2
- xenfra_sdk-0.2.2/src/xenfra_sdk/resources/files.py +101 -0
- xenfra_sdk-0.2.2/src/xenfra_sdk/templates/Dockerfile.j2 +38 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/templates/docker-compose.yml.j2 +5 -2
- xenfra_sdk-0.2.1/src/xenfra_sdk/dockerizer.py +0 -104
- xenfra_sdk-0.2.1/src/xenfra_sdk/templates/Dockerfile.j2 +0 -25
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/README.md +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/__init__.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/cli/__init__.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/cli/main.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/client_with_hooks.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/config.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/__init__.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/models.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/session.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/dependencies.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/exceptions.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/mcp_client.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/patterns.json +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/privacy.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/recipes.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/__init__.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/base.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/intelligence.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/projects.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/security.py +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/templates/cloud-init.sh.j2 +0 -0
- {xenfra_sdk-0.2.1 → xenfra_sdk-0.2.2}/src/xenfra_sdk/utils.py +0 -0
|
@@ -1,87 +1,90 @@
|
|
|
1
|
-
import os
|
|
2
|
-
|
|
3
|
-
import httpx
|
|
4
|
-
|
|
5
|
-
from .exceptions import AuthenticationError, XenfraAPIError, XenfraError
|
|
6
|
-
from .resources.deployments import DeploymentsManager
|
|
7
|
-
from .resources.
|
|
8
|
-
from .resources.
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
self.
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
self.
|
|
36
|
-
self.
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
#
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
raise
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from .exceptions import AuthenticationError, XenfraAPIError, XenfraError
|
|
6
|
+
from .resources.deployments import DeploymentsManager
|
|
7
|
+
from .resources.files import FilesManager
|
|
8
|
+
from .resources.intelligence import IntelligenceManager
|
|
9
|
+
from .resources.projects import ProjectsManager
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class XenfraClient:
|
|
13
|
+
def __init__(self, token: str = None, api_url: str = None):
|
|
14
|
+
# Use provided URL, or fall back to env var, or default to production
|
|
15
|
+
if api_url is None:
|
|
16
|
+
api_url = os.getenv("XENFRA_API_URL", "https://api.xenfra.tech")
|
|
17
|
+
|
|
18
|
+
self.api_url = api_url
|
|
19
|
+
self._token = token or os.getenv("XENFRA_TOKEN")
|
|
20
|
+
if not self._token:
|
|
21
|
+
raise AuthenticationError(
|
|
22
|
+
"No API token provided. Pass it to the client or set XENFRA_TOKEN."
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
self._http_client = httpx.Client(
|
|
26
|
+
base_url=self.api_url,
|
|
27
|
+
headers={"Authorization": f"Bearer {self._token}", "Content-Type": "application/json"},
|
|
28
|
+
timeout=30.0, # Add a reasonable timeout
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Track if client is closed
|
|
32
|
+
self._closed = False
|
|
33
|
+
|
|
34
|
+
# Initialize resource managers
|
|
35
|
+
self.projects = ProjectsManager(self)
|
|
36
|
+
self.deployments = DeploymentsManager(self)
|
|
37
|
+
self.intelligence = IntelligenceManager(self)
|
|
38
|
+
self.files = FilesManager(self)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _request(self, method: str, path: str, json: dict = None) -> httpx.Response:
|
|
42
|
+
"""Internal method to handle all HTTP requests."""
|
|
43
|
+
if self._closed:
|
|
44
|
+
raise XenfraError("Client is closed. Create a new client or use context manager.")
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
response = self._http_client.request(method, path, json=json)
|
|
48
|
+
response.raise_for_status() # Raise HTTPStatusError for 4xx/5xx
|
|
49
|
+
return response
|
|
50
|
+
except httpx.HTTPStatusError as e:
|
|
51
|
+
# Convert httpx error to our custom SDK error
|
|
52
|
+
# Safe JSON parsing with fallback
|
|
53
|
+
try:
|
|
54
|
+
content_type = e.response.headers.get("content-type", "")
|
|
55
|
+
if "application/json" in content_type:
|
|
56
|
+
try:
|
|
57
|
+
error_data = e.response.json()
|
|
58
|
+
detail = error_data.get(
|
|
59
|
+
"detail", e.response.text[:500] if e.response.text else "Unknown error"
|
|
60
|
+
)
|
|
61
|
+
except (ValueError, TypeError):
|
|
62
|
+
detail = e.response.text[:500] if e.response.text else "Unknown error"
|
|
63
|
+
else:
|
|
64
|
+
detail = e.response.text[:500] if e.response.text else "Unknown error"
|
|
65
|
+
except Exception:
|
|
66
|
+
detail = "Unknown error"
|
|
67
|
+
raise XenfraAPIError(status_code=e.response.status_code, detail=detail) from e
|
|
68
|
+
except httpx.RequestError as e:
|
|
69
|
+
# Handle connection errors, timeouts, etc.
|
|
70
|
+
raise XenfraError(f"HTTP request failed: {e}")
|
|
71
|
+
|
|
72
|
+
def close(self):
|
|
73
|
+
"""Close the HTTP client and cleanup resources."""
|
|
74
|
+
if not self._closed:
|
|
75
|
+
self._http_client.close()
|
|
76
|
+
self._closed = True
|
|
77
|
+
|
|
78
|
+
def __enter__(self):
|
|
79
|
+
"""Context manager entry - allows 'with XenfraClient() as client:' usage."""
|
|
80
|
+
return self
|
|
81
|
+
|
|
82
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
83
|
+
"""Context manager exit - ensures cleanup."""
|
|
84
|
+
self.close()
|
|
85
|
+
return False # Don't suppress exceptions
|
|
86
|
+
|
|
87
|
+
def __del__(self):
|
|
88
|
+
"""Destructor - cleanup if not already closed."""
|
|
89
|
+
if hasattr(self, "_closed") and not self._closed:
|
|
90
|
+
self.close()
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Xenfra Dockerizer - Generates deployment assets as strings.
|
|
3
|
+
|
|
4
|
+
This module renders Dockerfile and docker-compose.yml templates
|
|
5
|
+
to strings (in-memory) so they can be written to the target droplet via SSH.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, Optional
|
|
10
|
+
import re
|
|
11
|
+
|
|
12
|
+
from jinja2 import Environment, FileSystemLoader
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def detect_python_version(file_manifest: list = None) -> str:
|
|
16
|
+
"""
|
|
17
|
+
Detect Python version from project files.
|
|
18
|
+
|
|
19
|
+
Checks in order:
|
|
20
|
+
1. .python-version file (e.g., "3.13")
|
|
21
|
+
2. pyproject.toml requires-python field (e.g., ">=3.13")
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
file_manifest: List of file info dicts with 'path' and optionally 'content'
|
|
25
|
+
If None, uses 3.11 as default.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Docker image version string (e.g., "python:3.13-slim")
|
|
29
|
+
"""
|
|
30
|
+
default_version = "python:3.11-slim"
|
|
31
|
+
|
|
32
|
+
if not file_manifest:
|
|
33
|
+
return default_version
|
|
34
|
+
|
|
35
|
+
# Build a lookup dict for quick access
|
|
36
|
+
file_lookup = {f.get('path', ''): f for f in file_manifest}
|
|
37
|
+
|
|
38
|
+
# Option 1: Check .python-version file
|
|
39
|
+
if '.python-version' in file_lookup:
|
|
40
|
+
file_info = file_lookup['.python-version']
|
|
41
|
+
content = file_info.get('content', '')
|
|
42
|
+
if content:
|
|
43
|
+
# Parse version like "3.13" or "3.13.1"
|
|
44
|
+
version = content.strip().split('\n')[0].strip()
|
|
45
|
+
if version:
|
|
46
|
+
# Extract major.minor (e.g., "3.13" from "3.13.1")
|
|
47
|
+
match = re.match(r'(\d+\.\d+)', version)
|
|
48
|
+
if match:
|
|
49
|
+
return f"python:{match.group(1)}-slim"
|
|
50
|
+
|
|
51
|
+
# Option 2: Check pyproject.toml requires-python
|
|
52
|
+
if 'pyproject.toml' in file_lookup:
|
|
53
|
+
file_info = file_lookup['pyproject.toml']
|
|
54
|
+
content = file_info.get('content', '')
|
|
55
|
+
if content:
|
|
56
|
+
# Parse requires-python = ">=3.13" or "^3.13"
|
|
57
|
+
match = re.search(r'requires-python\s*=\s*["\']([^"\']+)["\']', content)
|
|
58
|
+
if match:
|
|
59
|
+
version_spec = match.group(1)
|
|
60
|
+
# Extract version number (e.g., "3.13" from ">=3.13")
|
|
61
|
+
version_match = re.search(r'(\d+\.\d+)', version_spec)
|
|
62
|
+
if version_match:
|
|
63
|
+
return f"python:{version_match.group(1)}-slim"
|
|
64
|
+
|
|
65
|
+
return default_version
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def render_deployment_assets(context: dict) -> Dict[str, str]:
|
|
69
|
+
"""
|
|
70
|
+
Renders deployment assets (Dockerfile, docker-compose.yml) using Jinja2 templates.
|
|
71
|
+
|
|
72
|
+
IMPORTANT: This function returns strings, NOT files. The caller is responsible
|
|
73
|
+
for writing these to the correct location (e.g., via SSH to a remote droplet).
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
context: A dictionary containing information for rendering templates.
|
|
77
|
+
Required keys:
|
|
78
|
+
- framework: str (fastapi, flask, django)
|
|
79
|
+
- port: int (default 8000)
|
|
80
|
+
Optional keys:
|
|
81
|
+
- command: str (start command, auto-generated if not provided)
|
|
82
|
+
- database: str (postgres, mysql, etc.)
|
|
83
|
+
- package_manager: str (pip, uv)
|
|
84
|
+
- dependency_file: str (requirements.txt, pyproject.toml)
|
|
85
|
+
- python_version: str (default python:3.11-slim)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Dict with keys "Dockerfile" and "docker-compose.yml", values are rendered content strings.
|
|
89
|
+
Returns empty dict if no framework is provided.
|
|
90
|
+
"""
|
|
91
|
+
# Path to the templates directory
|
|
92
|
+
template_dir = Path(__file__).parent / "templates"
|
|
93
|
+
env = Environment(loader=FileSystemLoader(template_dir))
|
|
94
|
+
|
|
95
|
+
# Get framework from context (MUST be provided by caller, no auto-detection)
|
|
96
|
+
framework = context.get("framework")
|
|
97
|
+
if not framework:
|
|
98
|
+
# Framework is required - caller should have validated this
|
|
99
|
+
return {}
|
|
100
|
+
|
|
101
|
+
# Get port with default
|
|
102
|
+
port = context.get("port") or 8000
|
|
103
|
+
|
|
104
|
+
# Generate default command based on framework if not provided
|
|
105
|
+
command = context.get("command")
|
|
106
|
+
if not command:
|
|
107
|
+
if framework == "fastapi":
|
|
108
|
+
command = f"uvicorn main:app --host 0.0.0.0 --port {port}"
|
|
109
|
+
elif framework == "flask":
|
|
110
|
+
command = f"gunicorn app:app -b 0.0.0.0:{port}"
|
|
111
|
+
elif framework == "django":
|
|
112
|
+
command = f"gunicorn app.wsgi:application --bind 0.0.0.0:{port}"
|
|
113
|
+
else:
|
|
114
|
+
command = f"uvicorn main:app --host 0.0.0.0 --port {port}"
|
|
115
|
+
|
|
116
|
+
# Build render context with all values
|
|
117
|
+
render_context = {
|
|
118
|
+
"framework": framework,
|
|
119
|
+
"port": port,
|
|
120
|
+
"command": command,
|
|
121
|
+
"database": context.get("database"),
|
|
122
|
+
"package_manager": context.get("package_manager", "pip"),
|
|
123
|
+
"dependency_file": context.get("dependency_file", "requirements.txt"),
|
|
124
|
+
"python_version": context.get("python_version", "python:3.11-slim"),
|
|
125
|
+
# Pass through any additional context
|
|
126
|
+
**context,
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
result = {}
|
|
130
|
+
|
|
131
|
+
# --- 1. Render Dockerfile ---
|
|
132
|
+
dockerfile_template = env.get_template("Dockerfile.j2")
|
|
133
|
+
result["Dockerfile"] = dockerfile_template.render(render_context)
|
|
134
|
+
|
|
135
|
+
# --- 2. Render docker-compose.yml ---
|
|
136
|
+
compose_template = env.get_template("docker-compose.yml.j2")
|
|
137
|
+
result["docker-compose.yml"] = compose_template.render(render_context)
|
|
138
|
+
|
|
139
|
+
return result
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# Keep detect_framework for potential local CLI use (not used in remote deployment)
|
|
143
|
+
def detect_framework(path: str = ".") -> tuple:
|
|
144
|
+
"""
|
|
145
|
+
Scans common Python project structures to guess the framework and entrypoint.
|
|
146
|
+
|
|
147
|
+
NOTE: This is only useful when running LOCALLY on the user's machine.
|
|
148
|
+
It should NOT be called when the engine runs on a remote server.
|
|
149
|
+
|
|
150
|
+
Returns: (framework_name, default_port, start_command) or (None, None, None)
|
|
151
|
+
"""
|
|
152
|
+
project_root = Path(path).resolve()
|
|
153
|
+
|
|
154
|
+
# Check for Django first (common pattern: manage.py in root)
|
|
155
|
+
if (project_root / "manage.py").is_file():
|
|
156
|
+
project_name = project_root.name
|
|
157
|
+
return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
|
|
158
|
+
|
|
159
|
+
candidate_files = []
|
|
160
|
+
|
|
161
|
+
# Check directly in project root
|
|
162
|
+
for name in ["main.py", "app.py"]:
|
|
163
|
+
if (project_root / name).is_file():
|
|
164
|
+
candidate_files.append(project_root / name)
|
|
165
|
+
|
|
166
|
+
# Check in src/*/ (standard package layout)
|
|
167
|
+
for src_dir in project_root.glob("src/*"):
|
|
168
|
+
if src_dir.is_dir():
|
|
169
|
+
for name in ["main.py", "app.py"]:
|
|
170
|
+
if (src_dir / name).is_file():
|
|
171
|
+
candidate_files.append(src_dir / name)
|
|
172
|
+
|
|
173
|
+
import os
|
|
174
|
+
for file_path in candidate_files:
|
|
175
|
+
try:
|
|
176
|
+
with open(file_path, "r") as f:
|
|
177
|
+
content = f.read()
|
|
178
|
+
except Exception:
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
|
|
183
|
+
if module_name.startswith("src."):
|
|
184
|
+
module_name = module_name[4:]
|
|
185
|
+
except ValueError:
|
|
186
|
+
module_name = file_path.stem
|
|
187
|
+
|
|
188
|
+
if "FastAPI" in content:
|
|
189
|
+
return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
|
|
190
|
+
|
|
191
|
+
if "Flask" in content:
|
|
192
|
+
return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
|
|
193
|
+
|
|
194
|
+
return None, None, None
|
|
@@ -246,31 +246,77 @@ class InfraEngine:
|
|
|
246
246
|
db_session: Session = None,
|
|
247
247
|
port: int = 8000,
|
|
248
248
|
command: str = None,
|
|
249
|
+
entrypoint: str = None, # e.g., "todo.main:app"
|
|
249
250
|
database: str = None,
|
|
251
|
+
package_manager: str = None,
|
|
252
|
+
dependency_file: str = None,
|
|
253
|
+
file_manifest: list = None, # Delta upload: [{path, sha, size}, ...]
|
|
254
|
+
get_file_content: callable = None, # Function to get file content by SHA
|
|
250
255
|
**kwargs,
|
|
251
256
|
):
|
|
252
257
|
"""A stateful, blocking orchestrator for deploying a new server."""
|
|
253
258
|
droplet = None
|
|
254
259
|
session = db_session or self.db_session
|
|
260
|
+
branch = kwargs.get("branch", "main") # Extract branch from kwargs
|
|
261
|
+
framework = kwargs.get("framework") # Extract framework from kwargs
|
|
262
|
+
|
|
255
263
|
try:
|
|
264
|
+
# === 0. EARLY VALIDATION ===
|
|
265
|
+
# Check code source BEFORE creating droplet
|
|
266
|
+
has_code_source = repo_url or (file_manifest and get_file_content)
|
|
267
|
+
if os.getenv("XENFRA_SERVICE_MODE") == "true" and not has_code_source:
|
|
268
|
+
raise DeploymentError(
|
|
269
|
+
"No code source provided. Use git_repo URL or upload files first. "
|
|
270
|
+
"Local folder deployment is not supported via the cloud API.",
|
|
271
|
+
stage="Validation",
|
|
272
|
+
)
|
|
273
|
+
|
|
256
274
|
# === 1. SETUP STAGE ===
|
|
257
275
|
logger("\n[bold blue]PHASE 1: SETUP[/bold blue]")
|
|
258
276
|
ssh_key = self._ensure_ssh_key(logger)
|
|
259
277
|
|
|
260
278
|
# === 2. ASSET GENERATION STAGE ===
|
|
261
279
|
logger("\n[bold blue]PHASE 2: GENERATING DEPLOYMENT ASSETS[/bold blue]")
|
|
280
|
+
|
|
281
|
+
# Detect Python version from project files if using delta upload
|
|
282
|
+
python_version = "python:3.11-slim" # Default
|
|
283
|
+
if file_manifest and get_file_content:
|
|
284
|
+
# Build file info with content for version detection
|
|
285
|
+
version_files = []
|
|
286
|
+
for finfo in file_manifest:
|
|
287
|
+
path = finfo.get('path', '')
|
|
288
|
+
if path in ['.python-version', 'pyproject.toml']:
|
|
289
|
+
content = get_file_content(finfo.get('sha', ''))
|
|
290
|
+
if content:
|
|
291
|
+
version_files.append({
|
|
292
|
+
'path': path,
|
|
293
|
+
'content': content.decode('utf-8', errors='ignore')
|
|
294
|
+
})
|
|
295
|
+
|
|
296
|
+
if version_files:
|
|
297
|
+
python_version = dockerizer.detect_python_version(version_files)
|
|
298
|
+
logger(f" - Detected Python version: {python_version}")
|
|
299
|
+
|
|
262
300
|
context = {
|
|
263
301
|
"email": email,
|
|
264
302
|
"domain": domain,
|
|
265
303
|
"repo_url": repo_url,
|
|
266
|
-
"port": port,
|
|
304
|
+
"port": port or 8000,
|
|
267
305
|
"command": command,
|
|
306
|
+
"entrypoint": entrypoint, # Pass entrypoint to templates (e.g., "todo.main:app")
|
|
268
307
|
"database": database,
|
|
308
|
+
"package_manager": package_manager or "pip",
|
|
309
|
+
"dependency_file": dependency_file or "requirements.txt",
|
|
310
|
+
"framework": framework, # Explicitly include framework
|
|
311
|
+
"python_version": python_version, # Auto-detected or default
|
|
269
312
|
**kwargs, # Pass any additional config
|
|
270
313
|
}
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
314
|
+
# Render templates to strings (NOT written to disk)
|
|
315
|
+
rendered_assets = dockerizer.render_deployment_assets(context)
|
|
316
|
+
if not rendered_assets:
|
|
317
|
+
raise DeploymentError("Failed to render deployment assets. Is framework specified?", stage="Asset Generation")
|
|
318
|
+
for filename in rendered_assets:
|
|
319
|
+
logger(f" - Rendered {filename} ({len(rendered_assets[filename])} bytes)")
|
|
274
320
|
|
|
275
321
|
# === 3. CLOUD-INIT STAGE ===
|
|
276
322
|
logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
|
|
@@ -282,20 +328,31 @@ class InfraEngine:
|
|
|
282
328
|
|
|
283
329
|
# === 4. DROPLET CREATION STAGE ===
|
|
284
330
|
logger("\n[bold blue]PHASE 4: PROVISIONING SERVER[/bold blue]")
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
331
|
+
|
|
332
|
+
# Machine Reuse: Look for existing droplet with same name and 'xenfra' tag
|
|
333
|
+
existing_droplets = digitalocean.Manager(token=self.token).get_all_droplets(tag_name="xenfra")
|
|
334
|
+
droplet = next((d for d in existing_droplets if d.name == name), None)
|
|
335
|
+
|
|
336
|
+
if droplet and droplet.status == "active":
|
|
337
|
+
logger(f" - Found existing active droplet '{name}' (ID: {droplet.id}). Reusing machine...")
|
|
338
|
+
else:
|
|
339
|
+
if droplet:
|
|
340
|
+
logger(f" - Found existing droplet '{name}' but it's not active ({droplet.status}). Creating new one...")
|
|
341
|
+
|
|
342
|
+
droplet = digitalocean.Droplet(
|
|
343
|
+
token=self.token,
|
|
344
|
+
name=name,
|
|
345
|
+
region=region,
|
|
346
|
+
image=image,
|
|
347
|
+
size_slug=size,
|
|
348
|
+
ssh_keys=[ssh_key.id],
|
|
349
|
+
user_data=cloud_init_script,
|
|
350
|
+
tags=["xenfra"],
|
|
351
|
+
)
|
|
352
|
+
droplet.create()
|
|
353
|
+
logger(
|
|
354
|
+
f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
|
|
355
|
+
)
|
|
299
356
|
|
|
300
357
|
# === 5. POLLING STAGE ===
|
|
301
358
|
logger("\n[bold blue]PHASE 5: WAITING FOR SERVER SETUP[/bold blue]")
|
|
@@ -370,20 +427,77 @@ class InfraEngine:
|
|
|
370
427
|
# === 6. CODE UPLOAD STAGE ===
|
|
371
428
|
logger("\n[bold blue]PHASE 6: UPLOADING APPLICATION CODE[/bold blue]")
|
|
372
429
|
with self._get_connection(ip_address) as conn:
|
|
373
|
-
#
|
|
430
|
+
# Option 1: Git clone (if repo_url provided)
|
|
374
431
|
if repo_url:
|
|
375
|
-
logger(f" - Cloning repository from {repo_url}...")
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
if
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
432
|
+
logger(f" - Cloning repository from {repo_url} (branch: {branch})...")
|
|
433
|
+
# Use --branch to checkout specific branch, --single-branch for efficiency
|
|
434
|
+
clone_cmd = f"git clone --branch {branch} --single-branch {repo_url} /root/app"
|
|
435
|
+
result = conn.run(clone_cmd, warn=True, hide=True)
|
|
436
|
+
if result.failed:
|
|
437
|
+
# Try without --single-branch in case branch doesn't exist
|
|
438
|
+
# Clean up any partial clone first
|
|
439
|
+
logger(f" - Branch '{branch}' clone failed, trying default branch...")
|
|
440
|
+
conn.run("rm -rf /root/app", warn=True, hide=True)
|
|
441
|
+
conn.run(f"git clone {repo_url} /root/app")
|
|
442
|
+
|
|
443
|
+
# Option 2: Delta upload (if file_manifest provided)
|
|
444
|
+
elif file_manifest and get_file_content:
|
|
445
|
+
logger(f" - Syncing {len(file_manifest)} files via delta upload...")
|
|
446
|
+
|
|
447
|
+
# Ensure /root/app exists
|
|
448
|
+
conn.run("mkdir -p /root/app", hide=True)
|
|
449
|
+
|
|
450
|
+
for i, file_info in enumerate(file_manifest):
|
|
451
|
+
path = file_info['path']
|
|
452
|
+
sha = file_info['sha']
|
|
453
|
+
size = file_info.get('size', 0)
|
|
454
|
+
|
|
455
|
+
# Build Safety: Placeholder for 0-byte critical files
|
|
456
|
+
# (Hatchling/Pip fail if README.md or __init__.py are mentioned but empty)
|
|
457
|
+
is_critical_empty = (
|
|
458
|
+
size == 0 and
|
|
459
|
+
(path.lower() == 'readme.md' or path.endswith('__init__.py'))
|
|
384
460
|
)
|
|
461
|
+
|
|
462
|
+
# Smart Incremental Sync: Check if file exists and has same SHA
|
|
463
|
+
remote_path = f"/root/app/{path}"
|
|
464
|
+
check_sha_cmd = f"sha256sum {remote_path}"
|
|
465
|
+
result = conn.run(check_sha_cmd, warn=True, hide=True)
|
|
466
|
+
|
|
467
|
+
if result.ok:
|
|
468
|
+
remote_sha = result.stdout.split()[0]
|
|
469
|
+
if remote_sha == sha and not is_critical_empty:
|
|
470
|
+
# File already exists and matches, skip upload
|
|
471
|
+
continue
|
|
472
|
+
|
|
473
|
+
# Get file content from storage
|
|
474
|
+
content = get_file_content(sha)
|
|
475
|
+
if content is None:
|
|
476
|
+
raise DeploymentError(f"File not found in storage: {path} (sha: {sha})", stage="Code Upload")
|
|
477
|
+
|
|
478
|
+
# Apply placeholder if critical and empty
|
|
479
|
+
if is_critical_empty:
|
|
480
|
+
content = b"# xenfra placeholder\n"
|
|
481
|
+
logger(f" - [Zen Mode] Injected placeholder into empty {path}")
|
|
482
|
+
|
|
483
|
+
# Create directory if needed
|
|
484
|
+
dir_path = os.path.dirname(path)
|
|
485
|
+
if dir_path:
|
|
486
|
+
conn.run(f"mkdir -p /root/app/{dir_path}", warn=True, hide=True)
|
|
487
|
+
|
|
488
|
+
# Use SFTP for file transfer (handles large files)
|
|
489
|
+
from io import BytesIO
|
|
490
|
+
conn.put(BytesIO(content), remote_path)
|
|
491
|
+
|
|
492
|
+
# Progress update every 10 files
|
|
493
|
+
if (i + 1) % 10 == 0 or i == len(file_manifest) - 1:
|
|
494
|
+
logger(f" - Synced {i + 1}/{len(file_manifest)} files...")
|
|
385
495
|
|
|
386
|
-
|
|
496
|
+
logger(f" - All {len(file_manifest)} files synced.")
|
|
497
|
+
|
|
498
|
+
# Option 3: Local rsync (only works locally, not in service mode)
|
|
499
|
+
else:
|
|
500
|
+
# Note: Early validation in Phase 0 should have caught this for service mode
|
|
387
501
|
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
388
502
|
rsync_cmd = f'rsync -avz --exclude=".git" --exclude=".venv" --exclude="__pycache__" -e "ssh -i {private_key_path} -o StrictHostKeyChecking=no" . root@{ip_address}:/root/app/'
|
|
389
503
|
logger(f" - Uploading local code via rsync...")
|
|
@@ -392,14 +506,34 @@ class InfraEngine:
|
|
|
392
506
|
raise DeploymentError(f"rsync failed: {result.stderr}", stage="Code Upload")
|
|
393
507
|
logger(" - Code upload complete.")
|
|
394
508
|
|
|
509
|
+
|
|
510
|
+
# === 6.5. WRITE DEPLOYMENT ASSETS TO DROPLET ===
|
|
511
|
+
logger("\n[bold blue]PHASE 6.5: WRITING DEPLOYMENT ASSETS[/bold blue]")
|
|
512
|
+
with self._get_connection(ip_address) as conn:
|
|
513
|
+
for filename, content in rendered_assets.items():
|
|
514
|
+
# Use heredoc with unique delimiter to write file content
|
|
515
|
+
# Single-quoted delimiter prevents shell variable expansion
|
|
516
|
+
logger(f" - Writing {filename}...")
|
|
517
|
+
try:
|
|
518
|
+
# Use base64 encoding to safely transfer file content
|
|
519
|
+
# Use printf to avoid issues with special characters
|
|
520
|
+
import base64
|
|
521
|
+
encoded_content = base64.b64encode(content.encode()).decode()
|
|
522
|
+
# Use printf with %s to handle any special characters in base64
|
|
523
|
+
conn.run(f"printf '%s' '{encoded_content}' | base64 -d > /root/app/{filename}")
|
|
524
|
+
except Exception as e:
|
|
525
|
+
raise DeploymentError(f"Failed to write {filename}: {e}", stage="Asset Write")
|
|
526
|
+
logger(" - Deployment assets written.")
|
|
527
|
+
|
|
395
528
|
# === 7. FINAL DEPLOY STAGE ===
|
|
396
529
|
if is_dockerized:
|
|
397
530
|
logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
|
|
398
531
|
with self._get_connection(ip_address) as conn:
|
|
399
|
-
|
|
532
|
+
# Force --no-cache to ensure updated files (like README.md placeholders) are used
|
|
533
|
+
result = conn.run("cd /root/app && docker compose build --no-cache && docker compose up -d", hide=True)
|
|
400
534
|
if result.failed:
|
|
401
535
|
raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
|
|
402
|
-
logger(" - Docker
|
|
536
|
+
logger(" - Docker build complete, containers starting...")
|
|
403
537
|
else:
|
|
404
538
|
logger("\n[bold blue]PHASE 7: STARTING HOST-BASED APPLICATION[/bold blue]")
|
|
405
539
|
start_command = context.get("command", f"uvicorn main:app --port {context.get('port', 8000)}")
|
|
@@ -411,6 +545,10 @@ class InfraEngine:
|
|
|
411
545
|
|
|
412
546
|
# === 8. VERIFICATION STAGE ===
|
|
413
547
|
logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
|
|
548
|
+
|
|
549
|
+
# Give container a moment to initialize before first health check
|
|
550
|
+
time.sleep(5)
|
|
551
|
+
|
|
414
552
|
app_port = context.get("port", 8000)
|
|
415
553
|
for i in range(24): # 2-minute timeout for health checks
|
|
416
554
|
logger(f" - Health check attempt {i + 1}/24...")
|
|
@@ -418,7 +556,12 @@ class InfraEngine:
|
|
|
418
556
|
# Check if running
|
|
419
557
|
if is_dockerized:
|
|
420
558
|
ps_result = conn.run("cd /root/app && docker compose ps", hide=True)
|
|
421
|
-
|
|
559
|
+
ps_output = ps_result.stdout.lower()
|
|
560
|
+
# Docker Compose V1 shows "running", V2 shows "Up" in status
|
|
561
|
+
running = "running" in ps_output or " up " in ps_output
|
|
562
|
+
if "restarting" in ps_output:
|
|
563
|
+
logs = conn.run("cd /root/app && docker compose logs --tail 20", hide=True).stdout
|
|
564
|
+
raise DeploymentError(f"Application is crash-looping (restarting). Logs:\n{logs}", stage="Verification")
|
|
422
565
|
else:
|
|
423
566
|
ps_result = conn.run("ps aux | grep -v grep | grep python", hide=True)
|
|
424
567
|
running = ps_result.ok and len(ps_result.stdout.strip()) > 0
|
|
@@ -427,11 +570,19 @@ class InfraEngine:
|
|
|
427
570
|
time.sleep(5)
|
|
428
571
|
continue
|
|
429
572
|
|
|
430
|
-
# Check if application is responsive
|
|
431
|
-
|
|
432
|
-
|
|
573
|
+
# Check if application is responsive (port is listening)
|
|
574
|
+
# Accept ANY HTTP response (including 404) - it means the app is running
|
|
575
|
+
# Use curl with -w to get HTTP code, accept any response >= 100
|
|
576
|
+
port_check = conn.run(
|
|
577
|
+
f"curl -s -o /dev/null -w '%{{http_code}}' --connect-timeout 3 http://localhost:{app_port}/",
|
|
578
|
+
warn=True, hide=True
|
|
433
579
|
)
|
|
434
|
-
|
|
580
|
+
# curl may exit non-zero for 404, but still outputs HTTP code
|
|
581
|
+
http_code = port_check.stdout.strip()
|
|
582
|
+
|
|
583
|
+
# Any HTTP response (200, 404, 500, etc.) means app is running
|
|
584
|
+
if http_code.isdigit() and int(http_code) >= 100:
|
|
585
|
+
|
|
435
586
|
logger(
|
|
436
587
|
"[bold green] - Health check passed! Application is live.[/bold green]"
|
|
437
588
|
)
|
|
@@ -454,13 +605,10 @@ class InfraEngine:
|
|
|
454
605
|
return droplet # Return the full droplet object
|
|
455
606
|
time.sleep(5)
|
|
456
607
|
else:
|
|
457
|
-
#
|
|
608
|
+
# Capture logs on timeout failure
|
|
458
609
|
with self._get_connection(ip_address) as conn:
|
|
459
|
-
logs = conn.run("cd /root/app && docker compose logs", hide=True).stdout
|
|
460
|
-
raise DeploymentError(
|
|
461
|
-
f"Application failed to become healthy in time. Logs:\n{logs}",
|
|
462
|
-
stage="Verification",
|
|
463
|
-
)
|
|
610
|
+
logs = conn.run("cd /root/app && docker compose logs --tail 50", hide=True, warn=True).stdout if is_dockerized else ""
|
|
611
|
+
raise DeploymentError(f"Application failed to become healthy in time. Logs:\n{logs}", stage="Verification")
|
|
464
612
|
|
|
465
613
|
except Exception as e:
|
|
466
614
|
if droplet:
|
|
@@ -158,6 +158,7 @@ class CodebaseAnalysisResponse(BaseModel):
|
|
|
158
158
|
"""
|
|
159
159
|
|
|
160
160
|
framework: str = Field(..., description="Detected framework (fastapi, flask, django)")
|
|
161
|
+
entrypoint: str | None = Field(None, description="Application entrypoint (e.g., 'todo.main:app')")
|
|
161
162
|
port: int = Field(..., description="Detected application port")
|
|
162
163
|
database: str = Field(..., description="Detected database (postgresql, mysql, sqlite, none)")
|
|
163
164
|
cache: str | None = Field(None, description="Detected cache (redis, memcached, none)")
|
|
@@ -97,7 +97,7 @@ class DeploymentsManager(BaseManager):
|
|
|
97
97
|
except Exception as e:
|
|
98
98
|
raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
|
|
99
99
|
|
|
100
|
-
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, database: str = None) -> Iterator[dict]:
|
|
100
|
+
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, entrypoint: str = None, database: str = None, package_manager: str = None, dependency_file: str = None, file_manifest: list = None) -> Iterator[dict]:
|
|
101
101
|
"""
|
|
102
102
|
Creates a new deployment with real-time SSE log streaming.
|
|
103
103
|
|
|
@@ -105,7 +105,7 @@ class DeploymentsManager(BaseManager):
|
|
|
105
105
|
|
|
106
106
|
Args:
|
|
107
107
|
project_name: Name of the project
|
|
108
|
-
git_repo: Git repository URL
|
|
108
|
+
git_repo: Git repository URL (optional if file_manifest provided)
|
|
109
109
|
branch: Git branch to deploy
|
|
110
110
|
framework: Framework type (fastapi, flask, django)
|
|
111
111
|
region: DigitalOcean region (optional)
|
|
@@ -113,7 +113,11 @@ class DeploymentsManager(BaseManager):
|
|
|
113
113
|
is_dockerized: Whether to use Docker (optional)
|
|
114
114
|
port: Application port (optional, default 8000)
|
|
115
115
|
command: Start command (optional, auto-detected if not provided)
|
|
116
|
+
entrypoint: Application entrypoint (optional, e.g. 'todo.main:app')
|
|
116
117
|
database: Database type (optional, e.g. 'postgres')
|
|
118
|
+
package_manager: Package manager (optional, e.g. 'pip', 'uv')
|
|
119
|
+
dependency_file: Dependency file (optional, e.g. 'requirements.txt')
|
|
120
|
+
file_manifest: List of files for delta upload [{path, sha, size}, ...]
|
|
117
121
|
|
|
118
122
|
Yields:
|
|
119
123
|
dict: SSE events with 'event' and 'data' fields
|
|
@@ -141,8 +145,17 @@ class DeploymentsManager(BaseManager):
|
|
|
141
145
|
payload["port"] = port
|
|
142
146
|
if command:
|
|
143
147
|
payload["command"] = command
|
|
148
|
+
if entrypoint:
|
|
149
|
+
payload["entrypoint"] = entrypoint
|
|
144
150
|
if database:
|
|
145
151
|
payload["database"] = database
|
|
152
|
+
if package_manager:
|
|
153
|
+
payload["package_manager"] = package_manager
|
|
154
|
+
if dependency_file:
|
|
155
|
+
payload["dependency_file"] = dependency_file
|
|
156
|
+
if file_manifest:
|
|
157
|
+
payload["file_manifest"] = file_manifest
|
|
158
|
+
|
|
146
159
|
|
|
147
160
|
try:
|
|
148
161
|
# Use httpx to stream the SSE response
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Files resource manager for delta uploads.
|
|
3
|
+
|
|
4
|
+
Provides methods to check file cache status and upload files to the server.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, List
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FilesManager:
|
|
11
|
+
"""Manager for file upload operations."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, client):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the FilesManager.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
client: The XenfraClient instance.
|
|
19
|
+
"""
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def check(self, files: List[Dict]) -> Dict:
|
|
23
|
+
"""
|
|
24
|
+
Check which files are missing from server cache.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
files: List of file info dicts with keys: path, sha, size
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Dict with keys:
|
|
31
|
+
- missing: List of SHA hashes that need to be uploaded
|
|
32
|
+
- cached: Number of files already cached on server
|
|
33
|
+
"""
|
|
34
|
+
payload = {
|
|
35
|
+
"files": [
|
|
36
|
+
{"path": f["path"], "sha": f["sha"], "size": f["size"]}
|
|
37
|
+
for f in files
|
|
38
|
+
]
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
response = self._client._request("POST", "/files/check", json=payload)
|
|
42
|
+
return response.json()
|
|
43
|
+
|
|
44
|
+
def upload(self, content: bytes, sha: str, path: str) -> Dict:
|
|
45
|
+
"""
|
|
46
|
+
Upload a single file to the server.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
content: Raw file content as bytes
|
|
50
|
+
sha: SHA256 hash of the content
|
|
51
|
+
path: Relative file path
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dict with keys: sha, size, stored
|
|
55
|
+
"""
|
|
56
|
+
import httpx
|
|
57
|
+
|
|
58
|
+
headers = {
|
|
59
|
+
"Authorization": f"Bearer {self._client._token}",
|
|
60
|
+
"Content-Type": "application/octet-stream",
|
|
61
|
+
"X-Xenfra-Sha": sha,
|
|
62
|
+
"X-Xenfra-Path": path,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
response = httpx.post(
|
|
66
|
+
f"{self._client.api_url}/files/upload",
|
|
67
|
+
content=content,
|
|
68
|
+
headers=headers,
|
|
69
|
+
timeout=120.0, # 2 minutes for large files
|
|
70
|
+
)
|
|
71
|
+
response.raise_for_status()
|
|
72
|
+
return response.json()
|
|
73
|
+
|
|
74
|
+
def upload_files(self, files: List[Dict], missing_shas: List[str], progress_callback=None) -> int:
|
|
75
|
+
"""
|
|
76
|
+
Upload multiple files that are missing from the server.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
files: List of file info dicts with keys: path, sha, size, abs_path
|
|
80
|
+
missing_shas: List of SHA hashes that need to be uploaded
|
|
81
|
+
progress_callback: Optional callback(uploaded_count, total_count)
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Number of files uploaded
|
|
85
|
+
"""
|
|
86
|
+
missing_set = set(missing_shas)
|
|
87
|
+
files_to_upload = [f for f in files if f["sha"] in missing_set]
|
|
88
|
+
total = len(files_to_upload)
|
|
89
|
+
uploaded = 0
|
|
90
|
+
|
|
91
|
+
for file_info in files_to_upload:
|
|
92
|
+
with open(file_info["abs_path"], "rb") as f:
|
|
93
|
+
content = f.read()
|
|
94
|
+
|
|
95
|
+
self.upload(content, file_info["sha"], file_info["path"])
|
|
96
|
+
uploaded += 1
|
|
97
|
+
|
|
98
|
+
if progress_callback:
|
|
99
|
+
progress_callback(uploaded, total)
|
|
100
|
+
|
|
101
|
+
return uploaded
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# Dockerfile template for Python web applications
|
|
2
|
+
FROM {{ python_version | default('python:3.11-slim') }}
|
|
3
|
+
|
|
4
|
+
WORKDIR /app
|
|
5
|
+
|
|
6
|
+
{% if package_manager != 'pip' %}
|
|
7
|
+
# Install uv package manager and add to PATH
|
|
8
|
+
RUN apt-get update && apt-get install -y curl && \
|
|
9
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
|
10
|
+
apt-get remove -y curl && \
|
|
11
|
+
apt-get clean && \
|
|
12
|
+
rm -rf /var/lib/apt/lists/*
|
|
13
|
+
ENV PATH="/root/.local/bin:/root/.cargo/bin:$PATH"
|
|
14
|
+
{% endif %}
|
|
15
|
+
|
|
16
|
+
{% if dependency_file == 'pyproject.toml' %}
|
|
17
|
+
# For pyproject.toml, copy all files first (hatchling needs README.md etc.)
|
|
18
|
+
COPY . .
|
|
19
|
+
|
|
20
|
+
# Install dependencies
|
|
21
|
+
RUN uv pip install --system --no-cache .
|
|
22
|
+
{% else %}
|
|
23
|
+
COPY {{ dependency_file | default('requirements.txt') }} .
|
|
24
|
+
|
|
25
|
+
# Install dependencies
|
|
26
|
+
{% if package_manager == 'pip' %}
|
|
27
|
+
RUN pip install --no-cache-dir -r {{ dependency_file | default('requirements.txt') }}
|
|
28
|
+
{% else %}
|
|
29
|
+
RUN uv pip install --system --no-cache -r {{ dependency_file | default('requirements.txt') }}
|
|
30
|
+
{% endif %}
|
|
31
|
+
|
|
32
|
+
COPY . .
|
|
33
|
+
{% endif %}
|
|
34
|
+
|
|
35
|
+
# Expose the application port
|
|
36
|
+
EXPOSE {{ port | default(8000) }}
|
|
37
|
+
|
|
38
|
+
# The command to run the application will be in docker-compose.yml
|
|
@@ -1,12 +1,15 @@
|
|
|
1
1
|
# docker-compose.yml template
|
|
2
|
-
version: '3.8'
|
|
3
2
|
|
|
4
3
|
services:
|
|
5
4
|
app:
|
|
6
5
|
build: .
|
|
7
6
|
ports:
|
|
8
7
|
- "{{ port | default(8000) }}:{{ port | default(8000) }}"
|
|
9
|
-
|
|
8
|
+
{% if command and command != 'None' %}
|
|
9
|
+
command: {{ command }}
|
|
10
|
+
{% else %}
|
|
11
|
+
command: ["uvicorn", "{{ entrypoint | default('src.main:app') }}", "--host", "0.0.0.0", "--port", "{{ port | default(8000) }}"]
|
|
12
|
+
{% endif %}
|
|
10
13
|
{% if database == 'postgres' or database == 'postgresql' %}
|
|
11
14
|
depends_on:
|
|
12
15
|
- db
|
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
from pathlib import Path
|
|
3
|
-
|
|
4
|
-
from jinja2 import Environment, FileSystemLoader
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def detect_framework(path="."):
|
|
8
|
-
"""
|
|
9
|
-
Scans common Python project structures to guess the framework and entrypoint.
|
|
10
|
-
Returns: (framework_name, default_port, start_command) or (None, None, None)
|
|
11
|
-
"""
|
|
12
|
-
project_root = Path(path).resolve()
|
|
13
|
-
|
|
14
|
-
# Check for Django first (common pattern: manage.py in root)
|
|
15
|
-
if (project_root / "manage.py").is_file():
|
|
16
|
-
# Assume the project name is the current directory name
|
|
17
|
-
project_name = project_root.name
|
|
18
|
-
return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
|
|
19
|
-
|
|
20
|
-
candidate_files = []
|
|
21
|
-
|
|
22
|
-
# Check directly in project root
|
|
23
|
-
for name in ["main.py", "app.py"]:
|
|
24
|
-
if (project_root / name).is_file():
|
|
25
|
-
candidate_files.append(project_root / name)
|
|
26
|
-
|
|
27
|
-
# Check in src/*/ (standard package layout)
|
|
28
|
-
for src_dir in project_root.glob("src/*"):
|
|
29
|
-
if src_dir.is_dir():
|
|
30
|
-
for name in ["main.py", "app.py"]:
|
|
31
|
-
if (src_dir / name).is_file():
|
|
32
|
-
candidate_files.append(src_dir / name)
|
|
33
|
-
|
|
34
|
-
for file_path in candidate_files:
|
|
35
|
-
with open(file_path, "r") as f:
|
|
36
|
-
content = f.read()
|
|
37
|
-
|
|
38
|
-
try:
|
|
39
|
-
module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
|
|
40
|
-
# If path is like src/testdeploy/main.py, module_name becomes src.testdeploy.main
|
|
41
|
-
if module_name.startswith("src."):
|
|
42
|
-
# Strip the "src." prefix for gunicorn/uvicorn
|
|
43
|
-
module_name = module_name[4:]
|
|
44
|
-
except ValueError:
|
|
45
|
-
module_name = file_path.stem
|
|
46
|
-
|
|
47
|
-
if "FastAPI" in content:
|
|
48
|
-
# Use standard :app convention
|
|
49
|
-
return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
|
|
50
|
-
|
|
51
|
-
if "Flask" in content:
|
|
52
|
-
return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
|
|
53
|
-
|
|
54
|
-
return None, None, None
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def generate_templated_assets(context: dict):
|
|
58
|
-
"""
|
|
59
|
-
Generates deployment assets (Dockerfile, docker-compose.yml) using Jinja2 templates.
|
|
60
|
-
|
|
61
|
-
Args:
|
|
62
|
-
context: A dictionary containing information for rendering templates,
|
|
63
|
-
e.g., {'database': 'postgres', 'python_version': 'python:3.11-slim'}
|
|
64
|
-
"""
|
|
65
|
-
# Path to the templates directory
|
|
66
|
-
template_dir = Path(__file__).parent / "templates"
|
|
67
|
-
env = Environment(loader=FileSystemLoader(template_dir))
|
|
68
|
-
|
|
69
|
-
# Detect framework specifics (use context if available, otherwise fallback to manual)
|
|
70
|
-
detected_framework, detected_port, detected_command = detect_framework()
|
|
71
|
-
|
|
72
|
-
framework = context.get("framework") or detected_framework
|
|
73
|
-
port = context.get("port") or detected_port
|
|
74
|
-
command = context.get("command") or detected_command
|
|
75
|
-
|
|
76
|
-
if not framework:
|
|
77
|
-
print("Warning: No recognizable web framework detected and no framework provided in context.")
|
|
78
|
-
return []
|
|
79
|
-
|
|
80
|
-
# Merge detected values with provided context (context takes precedence)
|
|
81
|
-
render_context = {
|
|
82
|
-
"framework": framework,
|
|
83
|
-
"port": port,
|
|
84
|
-
"command": command,
|
|
85
|
-
**context
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
generated_files = []
|
|
89
|
-
|
|
90
|
-
# --- 1. Dockerfile ---
|
|
91
|
-
dockerfile_template = env.get_template("Dockerfile.j2")
|
|
92
|
-
dockerfile_content = dockerfile_template.render(render_context)
|
|
93
|
-
with open("Dockerfile", "w") as f:
|
|
94
|
-
f.write(dockerfile_content)
|
|
95
|
-
generated_files.append("Dockerfile")
|
|
96
|
-
|
|
97
|
-
# --- 2. docker-compose.yml ---
|
|
98
|
-
compose_template = env.get_template("docker-compose.yml.j2")
|
|
99
|
-
compose_content = compose_template.render(render_context)
|
|
100
|
-
with open("docker-compose.yml", "w") as f:
|
|
101
|
-
f.write(compose_content)
|
|
102
|
-
generated_files.append("docker-compose.yml")
|
|
103
|
-
|
|
104
|
-
return generated_files
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
# Dockerfile template for Python web applications
|
|
2
|
-
FROM {{ python_version | default('python:3.11-slim') }}
|
|
3
|
-
|
|
4
|
-
WORKDIR /app
|
|
5
|
-
|
|
6
|
-
# Install uv, our preferred package manager
|
|
7
|
-
RUN apt-get update && apt-get install -y curl && \
|
|
8
|
-
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
|
9
|
-
apt-get remove -y curl && \
|
|
10
|
-
apt-get clean && \
|
|
11
|
-
rm -rf /var/lib/apt/lists/*
|
|
12
|
-
|
|
13
|
-
COPY requirements.txt .
|
|
14
|
-
|
|
15
|
-
# Install dependencies
|
|
16
|
-
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
|
|
17
|
-
|
|
18
|
-
COPY . .
|
|
19
|
-
|
|
20
|
-
# Expose the application port
|
|
21
|
-
EXPOSE {{ port | default(8000) }}
|
|
22
|
-
|
|
23
|
-
# The command to run the application will be in docker-compose.yml
|
|
24
|
-
# This allows for more flexibility
|
|
25
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|