xenfra-sdk 0.2.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/PKG-INFO +1 -1
  2. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/pyproject.toml +1 -1
  3. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/client.py +90 -87
  4. xenfra_sdk-0.2.2/src/xenfra_sdk/dockerizer.py +194 -0
  5. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/engine.py +230 -58
  6. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/models.py +1 -0
  7. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/deployments.py +26 -6
  8. xenfra_sdk-0.2.2/src/xenfra_sdk/resources/files.py +101 -0
  9. xenfra_sdk-0.2.2/src/xenfra_sdk/templates/Dockerfile.j2 +38 -0
  10. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/templates/docker-compose.yml.j2 +6 -9
  11. xenfra_sdk-0.2.0/src/xenfra_sdk/dockerizer.py +0 -104
  12. xenfra_sdk-0.2.0/src/xenfra_sdk/templates/Dockerfile.j2 +0 -25
  13. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/README.md +0 -0
  14. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/__init__.py +0 -0
  15. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/cli/__init__.py +0 -0
  16. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/cli/main.py +0 -0
  17. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/client_with_hooks.py +0 -0
  18. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/config.py +0 -0
  19. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/__init__.py +0 -0
  20. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/models.py +0 -0
  21. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/db/session.py +0 -0
  22. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/dependencies.py +0 -0
  23. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/exceptions.py +0 -0
  24. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/mcp_client.py +0 -0
  25. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/patterns.json +0 -0
  26. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/privacy.py +0 -0
  27. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/recipes.py +0 -0
  28. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/__init__.py +0 -0
  29. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/base.py +0 -0
  30. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/intelligence.py +0 -0
  31. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/resources/projects.py +0 -0
  32. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/security.py +0 -0
  33. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/templates/cloud-init.sh.j2 +0 -0
  34. {xenfra_sdk-0.2.0 → xenfra_sdk-0.2.2}/src/xenfra_sdk/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: xenfra-sdk
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Xenfra SDK: Core engine and utilities for the Xenfra platform.
5
5
  Author: xenfra-cloud
6
6
  Author-email: xenfra-cloud <xenfracloud@gmail.com>
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xenfra-sdk"
3
- version = "0.2.0"
3
+ version = "0.2.2"
4
4
  description = "Xenfra SDK: Core engine and utilities for the Xenfra platform."
5
5
  readme = "README.md"
6
6
  authors = [
@@ -1,87 +1,90 @@
1
- import os
2
-
3
- import httpx
4
-
5
- from .exceptions import AuthenticationError, XenfraAPIError, XenfraError
6
- from .resources.deployments import DeploymentsManager
7
- from .resources.intelligence import IntelligenceManager
8
- from .resources.projects import ProjectsManager
9
-
10
-
11
- class XenfraClient:
12
- def __init__(self, token: str = None, api_url: str = None):
13
- # Use provided URL, or fall back to env var, or default to production
14
- if api_url is None:
15
- api_url = os.getenv("XENFRA_API_URL", "https://api.xenfra.tech")
16
-
17
- self.api_url = api_url
18
- self._token = token or os.getenv("XENFRA_TOKEN")
19
- if not self._token:
20
- raise AuthenticationError(
21
- "No API token provided. Pass it to the client or set XENFRA_TOKEN."
22
- )
23
-
24
- self._http_client = httpx.Client(
25
- base_url=self.api_url,
26
- headers={"Authorization": f"Bearer {self._token}", "Content-Type": "application/json"},
27
- timeout=30.0, # Add a reasonable timeout
28
- )
29
-
30
- # Track if client is closed
31
- self._closed = False
32
-
33
- # Initialize resource managers
34
- self.projects = ProjectsManager(self)
35
- self.deployments = DeploymentsManager(self)
36
- self.intelligence = IntelligenceManager(self)
37
-
38
- def _request(self, method: str, path: str, json: dict = None) -> httpx.Response:
39
- """Internal method to handle all HTTP requests."""
40
- if self._closed:
41
- raise XenfraError("Client is closed. Create a new client or use context manager.")
42
-
43
- try:
44
- response = self._http_client.request(method, path, json=json)
45
- response.raise_for_status() # Raise HTTPStatusError for 4xx/5xx
46
- return response
47
- except httpx.HTTPStatusError as e:
48
- # Convert httpx error to our custom SDK error
49
- # Safe JSON parsing with fallback
50
- try:
51
- content_type = e.response.headers.get("content-type", "")
52
- if "application/json" in content_type:
53
- try:
54
- error_data = e.response.json()
55
- detail = error_data.get(
56
- "detail", e.response.text[:500] if e.response.text else "Unknown error"
57
- )
58
- except (ValueError, TypeError):
59
- detail = e.response.text[:500] if e.response.text else "Unknown error"
60
- else:
61
- detail = e.response.text[:500] if e.response.text else "Unknown error"
62
- except Exception:
63
- detail = "Unknown error"
64
- raise XenfraAPIError(status_code=e.response.status_code, detail=detail) from e
65
- except httpx.RequestError as e:
66
- # Handle connection errors, timeouts, etc.
67
- raise XenfraError(f"HTTP request failed: {e}")
68
-
69
- def close(self):
70
- """Close the HTTP client and cleanup resources."""
71
- if not self._closed:
72
- self._http_client.close()
73
- self._closed = True
74
-
75
- def __enter__(self):
76
- """Context manager entry - allows 'with XenfraClient() as client:' usage."""
77
- return self
78
-
79
- def __exit__(self, exc_type, exc_val, exc_tb):
80
- """Context manager exit - ensures cleanup."""
81
- self.close()
82
- return False # Don't suppress exceptions
83
-
84
- def __del__(self):
85
- """Destructor - cleanup if not already closed."""
86
- if hasattr(self, "_closed") and not self._closed:
87
- self.close()
1
+ import os
2
+
3
+ import httpx
4
+
5
+ from .exceptions import AuthenticationError, XenfraAPIError, XenfraError
6
+ from .resources.deployments import DeploymentsManager
7
+ from .resources.files import FilesManager
8
+ from .resources.intelligence import IntelligenceManager
9
+ from .resources.projects import ProjectsManager
10
+
11
+
12
+ class XenfraClient:
13
+ def __init__(self, token: str = None, api_url: str = None):
14
+ # Use provided URL, or fall back to env var, or default to production
15
+ if api_url is None:
16
+ api_url = os.getenv("XENFRA_API_URL", "https://api.xenfra.tech")
17
+
18
+ self.api_url = api_url
19
+ self._token = token or os.getenv("XENFRA_TOKEN")
20
+ if not self._token:
21
+ raise AuthenticationError(
22
+ "No API token provided. Pass it to the client or set XENFRA_TOKEN."
23
+ )
24
+
25
+ self._http_client = httpx.Client(
26
+ base_url=self.api_url,
27
+ headers={"Authorization": f"Bearer {self._token}", "Content-Type": "application/json"},
28
+ timeout=30.0, # Add a reasonable timeout
29
+ )
30
+
31
+ # Track if client is closed
32
+ self._closed = False
33
+
34
+ # Initialize resource managers
35
+ self.projects = ProjectsManager(self)
36
+ self.deployments = DeploymentsManager(self)
37
+ self.intelligence = IntelligenceManager(self)
38
+ self.files = FilesManager(self)
39
+
40
+
41
+ def _request(self, method: str, path: str, json: dict = None) -> httpx.Response:
42
+ """Internal method to handle all HTTP requests."""
43
+ if self._closed:
44
+ raise XenfraError("Client is closed. Create a new client or use context manager.")
45
+
46
+ try:
47
+ response = self._http_client.request(method, path, json=json)
48
+ response.raise_for_status() # Raise HTTPStatusError for 4xx/5xx
49
+ return response
50
+ except httpx.HTTPStatusError as e:
51
+ # Convert httpx error to our custom SDK error
52
+ # Safe JSON parsing with fallback
53
+ try:
54
+ content_type = e.response.headers.get("content-type", "")
55
+ if "application/json" in content_type:
56
+ try:
57
+ error_data = e.response.json()
58
+ detail = error_data.get(
59
+ "detail", e.response.text[:500] if e.response.text else "Unknown error"
60
+ )
61
+ except (ValueError, TypeError):
62
+ detail = e.response.text[:500] if e.response.text else "Unknown error"
63
+ else:
64
+ detail = e.response.text[:500] if e.response.text else "Unknown error"
65
+ except Exception:
66
+ detail = "Unknown error"
67
+ raise XenfraAPIError(status_code=e.response.status_code, detail=detail) from e
68
+ except httpx.RequestError as e:
69
+ # Handle connection errors, timeouts, etc.
70
+ raise XenfraError(f"HTTP request failed: {e}")
71
+
72
+ def close(self):
73
+ """Close the HTTP client and cleanup resources."""
74
+ if not self._closed:
75
+ self._http_client.close()
76
+ self._closed = True
77
+
78
+ def __enter__(self):
79
+ """Context manager entry - allows 'with XenfraClient() as client:' usage."""
80
+ return self
81
+
82
+ def __exit__(self, exc_type, exc_val, exc_tb):
83
+ """Context manager exit - ensures cleanup."""
84
+ self.close()
85
+ return False # Don't suppress exceptions
86
+
87
+ def __del__(self):
88
+ """Destructor - cleanup if not already closed."""
89
+ if hasattr(self, "_closed") and not self._closed:
90
+ self.close()
@@ -0,0 +1,194 @@
1
+ """
2
+ Xenfra Dockerizer - Generates deployment assets as strings.
3
+
4
+ This module renders Dockerfile and docker-compose.yml templates
5
+ to strings (in-memory) so they can be written to the target droplet via SSH.
6
+ """
7
+
8
+ from pathlib import Path
9
+ from typing import Dict, Optional
10
+ import re
11
+
12
+ from jinja2 import Environment, FileSystemLoader
13
+
14
+
15
+ def detect_python_version(file_manifest: list = None) -> str:
16
+ """
17
+ Detect Python version from project files.
18
+
19
+ Checks in order:
20
+ 1. .python-version file (e.g., "3.13")
21
+ 2. pyproject.toml requires-python field (e.g., ">=3.13")
22
+
23
+ Args:
24
+ file_manifest: List of file info dicts with 'path' and optionally 'content'
25
+ If None, uses 3.11 as default.
26
+
27
+ Returns:
28
+ Docker image version string (e.g., "python:3.13-slim")
29
+ """
30
+ default_version = "python:3.11-slim"
31
+
32
+ if not file_manifest:
33
+ return default_version
34
+
35
+ # Build a lookup dict for quick access
36
+ file_lookup = {f.get('path', ''): f for f in file_manifest}
37
+
38
+ # Option 1: Check .python-version file
39
+ if '.python-version' in file_lookup:
40
+ file_info = file_lookup['.python-version']
41
+ content = file_info.get('content', '')
42
+ if content:
43
+ # Parse version like "3.13" or "3.13.1"
44
+ version = content.strip().split('\n')[0].strip()
45
+ if version:
46
+ # Extract major.minor (e.g., "3.13" from "3.13.1")
47
+ match = re.match(r'(\d+\.\d+)', version)
48
+ if match:
49
+ return f"python:{match.group(1)}-slim"
50
+
51
+ # Option 2: Check pyproject.toml requires-python
52
+ if 'pyproject.toml' in file_lookup:
53
+ file_info = file_lookup['pyproject.toml']
54
+ content = file_info.get('content', '')
55
+ if content:
56
+ # Parse requires-python = ">=3.13" or "^3.13"
57
+ match = re.search(r'requires-python\s*=\s*["\']([^"\']+)["\']', content)
58
+ if match:
59
+ version_spec = match.group(1)
60
+ # Extract version number (e.g., "3.13" from ">=3.13")
61
+ version_match = re.search(r'(\d+\.\d+)', version_spec)
62
+ if version_match:
63
+ return f"python:{version_match.group(1)}-slim"
64
+
65
+ return default_version
66
+
67
+
68
+ def render_deployment_assets(context: dict) -> Dict[str, str]:
69
+ """
70
+ Renders deployment assets (Dockerfile, docker-compose.yml) using Jinja2 templates.
71
+
72
+ IMPORTANT: This function returns strings, NOT files. The caller is responsible
73
+ for writing these to the correct location (e.g., via SSH to a remote droplet).
74
+
75
+ Args:
76
+ context: A dictionary containing information for rendering templates.
77
+ Required keys:
78
+ - framework: str (fastapi, flask, django)
79
+ - port: int (default 8000)
80
+ Optional keys:
81
+ - command: str (start command, auto-generated if not provided)
82
+ - database: str (postgres, mysql, etc.)
83
+ - package_manager: str (pip, uv)
84
+ - dependency_file: str (requirements.txt, pyproject.toml)
85
+ - python_version: str (default python:3.11-slim)
86
+
87
+ Returns:
88
+ Dict with keys "Dockerfile" and "docker-compose.yml", values are rendered content strings.
89
+ Returns empty dict if no framework is provided.
90
+ """
91
+ # Path to the templates directory
92
+ template_dir = Path(__file__).parent / "templates"
93
+ env = Environment(loader=FileSystemLoader(template_dir))
94
+
95
+ # Get framework from context (MUST be provided by caller, no auto-detection)
96
+ framework = context.get("framework")
97
+ if not framework:
98
+ # Framework is required - caller should have validated this
99
+ return {}
100
+
101
+ # Get port with default
102
+ port = context.get("port") or 8000
103
+
104
+ # Generate default command based on framework if not provided
105
+ command = context.get("command")
106
+ if not command:
107
+ if framework == "fastapi":
108
+ command = f"uvicorn main:app --host 0.0.0.0 --port {port}"
109
+ elif framework == "flask":
110
+ command = f"gunicorn app:app -b 0.0.0.0:{port}"
111
+ elif framework == "django":
112
+ command = f"gunicorn app.wsgi:application --bind 0.0.0.0:{port}"
113
+ else:
114
+ command = f"uvicorn main:app --host 0.0.0.0 --port {port}"
115
+
116
+ # Build render context with all values
117
+ render_context = {
118
+ "framework": framework,
119
+ "port": port,
120
+ "command": command,
121
+ "database": context.get("database"),
122
+ "package_manager": context.get("package_manager", "pip"),
123
+ "dependency_file": context.get("dependency_file", "requirements.txt"),
124
+ "python_version": context.get("python_version", "python:3.11-slim"),
125
+ # Pass through any additional context
126
+ **context,
127
+ }
128
+
129
+ result = {}
130
+
131
+ # --- 1. Render Dockerfile ---
132
+ dockerfile_template = env.get_template("Dockerfile.j2")
133
+ result["Dockerfile"] = dockerfile_template.render(render_context)
134
+
135
+ # --- 2. Render docker-compose.yml ---
136
+ compose_template = env.get_template("docker-compose.yml.j2")
137
+ result["docker-compose.yml"] = compose_template.render(render_context)
138
+
139
+ return result
140
+
141
+
142
+ # Keep detect_framework for potential local CLI use (not used in remote deployment)
143
+ def detect_framework(path: str = ".") -> tuple:
144
+ """
145
+ Scans common Python project structures to guess the framework and entrypoint.
146
+
147
+ NOTE: This is only useful when running LOCALLY on the user's machine.
148
+ It should NOT be called when the engine runs on a remote server.
149
+
150
+ Returns: (framework_name, default_port, start_command) or (None, None, None)
151
+ """
152
+ project_root = Path(path).resolve()
153
+
154
+ # Check for Django first (common pattern: manage.py in root)
155
+ if (project_root / "manage.py").is_file():
156
+ project_name = project_root.name
157
+ return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
158
+
159
+ candidate_files = []
160
+
161
+ # Check directly in project root
162
+ for name in ["main.py", "app.py"]:
163
+ if (project_root / name).is_file():
164
+ candidate_files.append(project_root / name)
165
+
166
+ # Check in src/*/ (standard package layout)
167
+ for src_dir in project_root.glob("src/*"):
168
+ if src_dir.is_dir():
169
+ for name in ["main.py", "app.py"]:
170
+ if (src_dir / name).is_file():
171
+ candidate_files.append(src_dir / name)
172
+
173
+ import os
174
+ for file_path in candidate_files:
175
+ try:
176
+ with open(file_path, "r") as f:
177
+ content = f.read()
178
+ except Exception:
179
+ continue
180
+
181
+ try:
182
+ module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
183
+ if module_name.startswith("src."):
184
+ module_name = module_name[4:]
185
+ except ValueError:
186
+ module_name = file_path.stem
187
+
188
+ if "FastAPI" in content:
189
+ return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
190
+
191
+ if "Flask" in content:
192
+ return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
193
+
194
+ return None, None, None
@@ -162,7 +162,7 @@ class InfraEngine:
162
162
  # 3. Stream logs
163
163
  ip_address = droplet.ip_address
164
164
  with self._get_connection(ip_address) as conn:
165
- conn.run("cd /root/app && docker-compose logs -f app", pty=True)
165
+ conn.run("cd /root/app && docker compose logs -f app", pty=True)
166
166
 
167
167
  def get_account_balance(self) -> dict:
168
168
  """
@@ -244,27 +244,79 @@ class InfraEngine:
244
244
  repo_url: Optional[str] = None,
245
245
  is_dockerized: bool = True,
246
246
  db_session: Session = None,
247
+ port: int = 8000,
248
+ command: str = None,
249
+ entrypoint: str = None, # e.g., "todo.main:app"
250
+ database: str = None,
251
+ package_manager: str = None,
252
+ dependency_file: str = None,
253
+ file_manifest: list = None, # Delta upload: [{path, sha, size}, ...]
254
+ get_file_content: callable = None, # Function to get file content by SHA
247
255
  **kwargs,
248
256
  ):
249
257
  """A stateful, blocking orchestrator for deploying a new server."""
250
258
  droplet = None
251
259
  session = db_session or self.db_session
260
+ branch = kwargs.get("branch", "main") # Extract branch from kwargs
261
+ framework = kwargs.get("framework") # Extract framework from kwargs
262
+
252
263
  try:
264
+ # === 0. EARLY VALIDATION ===
265
+ # Check code source BEFORE creating droplet
266
+ has_code_source = repo_url or (file_manifest and get_file_content)
267
+ if os.getenv("XENFRA_SERVICE_MODE") == "true" and not has_code_source:
268
+ raise DeploymentError(
269
+ "No code source provided. Use git_repo URL or upload files first. "
270
+ "Local folder deployment is not supported via the cloud API.",
271
+ stage="Validation",
272
+ )
273
+
253
274
  # === 1. SETUP STAGE ===
254
275
  logger("\n[bold blue]PHASE 1: SETUP[/bold blue]")
255
276
  ssh_key = self._ensure_ssh_key(logger)
256
277
 
257
278
  # === 2. ASSET GENERATION STAGE ===
258
279
  logger("\n[bold blue]PHASE 2: GENERATING DEPLOYMENT ASSETS[/bold blue]")
280
+
281
+ # Detect Python version from project files if using delta upload
282
+ python_version = "python:3.11-slim" # Default
283
+ if file_manifest and get_file_content:
284
+ # Build file info with content for version detection
285
+ version_files = []
286
+ for finfo in file_manifest:
287
+ path = finfo.get('path', '')
288
+ if path in ['.python-version', 'pyproject.toml']:
289
+ content = get_file_content(finfo.get('sha', ''))
290
+ if content:
291
+ version_files.append({
292
+ 'path': path,
293
+ 'content': content.decode('utf-8', errors='ignore')
294
+ })
295
+
296
+ if version_files:
297
+ python_version = dockerizer.detect_python_version(version_files)
298
+ logger(f" - Detected Python version: {python_version}")
299
+
259
300
  context = {
260
301
  "email": email,
261
302
  "domain": domain,
262
303
  "repo_url": repo_url,
263
- **kwargs, # Pass db config, etc.
304
+ "port": port or 8000,
305
+ "command": command,
306
+ "entrypoint": entrypoint, # Pass entrypoint to templates (e.g., "todo.main:app")
307
+ "database": database,
308
+ "package_manager": package_manager or "pip",
309
+ "dependency_file": dependency_file or "requirements.txt",
310
+ "framework": framework, # Explicitly include framework
311
+ "python_version": python_version, # Auto-detected or default
312
+ **kwargs, # Pass any additional config
264
313
  }
265
- files = dockerizer.generate_templated_assets(context)
266
- for file in files:
267
- logger(f" - Generated {file}")
314
+ # Render templates to strings (NOT written to disk)
315
+ rendered_assets = dockerizer.render_deployment_assets(context)
316
+ if not rendered_assets:
317
+ raise DeploymentError("Failed to render deployment assets. Is framework specified?", stage="Asset Generation")
318
+ for filename in rendered_assets:
319
+ logger(f" - Rendered {filename} ({len(rendered_assets[filename])} bytes)")
268
320
 
269
321
  # === 3. CLOUD-INIT STAGE ===
270
322
  logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
@@ -276,20 +328,31 @@ class InfraEngine:
276
328
 
277
329
  # === 4. DROPLET CREATION STAGE ===
278
330
  logger("\n[bold blue]PHASE 4: PROVISIONING SERVER[/bold blue]")
279
- droplet = digitalocean.Droplet(
280
- token=self.token,
281
- name=name,
282
- region=region,
283
- image=image,
284
- size_slug=size,
285
- ssh_keys=[ssh_key],
286
- userdata=cloud_init_script,
287
- tags=["xenfra"],
288
- )
289
- droplet.create()
290
- logger(
291
- f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
292
- )
331
+
332
+ # Machine Reuse: Look for existing droplet with same name and 'xenfra' tag
333
+ existing_droplets = digitalocean.Manager(token=self.token).get_all_droplets(tag_name="xenfra")
334
+ droplet = next((d for d in existing_droplets if d.name == name), None)
335
+
336
+ if droplet and droplet.status == "active":
337
+ logger(f" - Found existing active droplet '{name}' (ID: {droplet.id}). Reusing machine...")
338
+ else:
339
+ if droplet:
340
+ logger(f" - Found existing droplet '{name}' but it's not active ({droplet.status}). Creating new one...")
341
+
342
+ droplet = digitalocean.Droplet(
343
+ token=self.token,
344
+ name=name,
345
+ region=region,
346
+ image=image,
347
+ size_slug=size,
348
+ ssh_keys=[ssh_key.id],
349
+ user_data=cloud_init_script,
350
+ tags=["xenfra"],
351
+ )
352
+ droplet.create()
353
+ logger(
354
+ f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
355
+ )
293
356
 
294
357
  # === 5. POLLING STAGE ===
295
358
  logger("\n[bold blue]PHASE 5: WAITING FOR SERVER SETUP[/bold blue]")
@@ -324,22 +387,36 @@ class InfraEngine:
324
387
  if not conn or not conn.is_connected:
325
388
  raise DeploymentError("Could not establish SSH connection.", stage="Polling")
326
389
 
390
+ logger(" - [DEBUG] Entering SSH context for Phase 5 polling...")
327
391
  with conn:
328
392
  last_log_line = 0
329
- for i in range(120): # 20-minute timeout for cloud-init (installing Docker, Caddy, etc.)
330
- # Check for completion
331
- if conn.run("test -f /root/setup_complete", warn=True, hide=True).ok:
332
- logger(" - Cloud-init setup complete.")
333
- break
393
+ logger(" - Polling server setup log (/root/setup.log)...")
394
+ for i in range(120): # 20-minute timeout
395
+ # Heartbeat
396
+ if i % 3 == 0: # Every 30 seconds
397
+ logger(f" - Phase 5 Heartbeat: Waiting for setup completion ({i+1}/120)...")
398
+
399
+ # Check for completion with timeout
400
+ try:
401
+ check_result = conn.run("test -f /root/setup_complete", warn=True, hide=True, timeout=10)
402
+ if check_result.ok:
403
+ logger(" - Cloud-init setup complete.")
404
+ break
405
+ except Exception as e:
406
+ logger(f" - [Warning] Status check failed: {e}. Retrying...")
334
407
 
335
408
  # Tail the setup log for visibility
336
- log_result = conn.run(f"tail -n +{last_log_line + 1} /root/setup.log 2>/dev/null", warn=True, hide=True)
337
- if log_result.ok and log_result.stdout.strip():
338
- new_lines = log_result.stdout.strip().split("\n")
339
- for line in new_lines:
340
- if line.strip():
341
- logger(f" [Server Setup] {line.strip()}")
342
- last_log_line += len(new_lines)
409
+ try:
410
+ log_result = conn.run(f"tail -n +{last_log_line + 1} /root/setup.log 2>/dev/null", warn=True, hide=True, timeout=10)
411
+ if log_result.ok and log_result.stdout.strip():
412
+ new_lines = log_result.stdout.strip().split("\n")
413
+ for line in new_lines:
414
+ if line.strip():
415
+ logger(f" [Server Setup] {line.strip()}")
416
+ last_log_line += len(new_lines)
417
+ except Exception as e:
418
+ # Log doesn't exist yet or tail failed
419
+ pass
343
420
 
344
421
  time.sleep(10)
345
422
  else:
@@ -350,32 +427,113 @@ class InfraEngine:
350
427
  # === 6. CODE UPLOAD STAGE ===
351
428
  logger("\n[bold blue]PHASE 6: UPLOADING APPLICATION CODE[/bold blue]")
352
429
  with self._get_connection(ip_address) as conn:
353
- # If repo_url is provided, clone it instead of uploading local code
430
+ # Option 1: Git clone (if repo_url provided)
354
431
  if repo_url:
355
- logger(f" - Cloning repository from {repo_url}...")
356
- conn.run(f"git clone {repo_url} /root/app")
357
- else:
358
- # Safety check: Prevent accidental deployment of Xenfra service code
359
- if os.getenv("XENFRA_SERVICE_MODE") == "true":
360
- raise DeploymentError(
361
- "Local folder deployment is not yet supported via the cloud API. "
362
- "Please provide a git_repo URL in your xenfra.yaml or CLI command.",
363
- stage="Code Upload",
432
+ logger(f" - Cloning repository from {repo_url} (branch: {branch})...")
433
+ # Use --branch to checkout specific branch, --single-branch for efficiency
434
+ clone_cmd = f"git clone --branch {branch} --single-branch {repo_url} /root/app"
435
+ result = conn.run(clone_cmd, warn=True, hide=True)
436
+ if result.failed:
437
+ # Try without --single-branch in case branch doesn't exist
438
+ # Clean up any partial clone first
439
+ logger(f" - Branch '{branch}' clone failed, trying default branch...")
440
+ conn.run("rm -rf /root/app", warn=True, hide=True)
441
+ conn.run(f"git clone {repo_url} /root/app")
442
+
443
+ # Option 2: Delta upload (if file_manifest provided)
444
+ elif file_manifest and get_file_content:
445
+ logger(f" - Syncing {len(file_manifest)} files via delta upload...")
446
+
447
+ # Ensure /root/app exists
448
+ conn.run("mkdir -p /root/app", hide=True)
449
+
450
+ for i, file_info in enumerate(file_manifest):
451
+ path = file_info['path']
452
+ sha = file_info['sha']
453
+ size = file_info.get('size', 0)
454
+
455
+ # Build Safety: Placeholder for 0-byte critical files
456
+ # (Hatchling/Pip fail if README.md or __init__.py are mentioned but empty)
457
+ is_critical_empty = (
458
+ size == 0 and
459
+ (path.lower() == 'readme.md' or path.endswith('__init__.py'))
364
460
  )
461
+
462
+ # Smart Incremental Sync: Check if file exists and has same SHA
463
+ remote_path = f"/root/app/{path}"
464
+ check_sha_cmd = f"sha256sum {remote_path}"
465
+ result = conn.run(check_sha_cmd, warn=True, hide=True)
466
+
467
+ if result.ok:
468
+ remote_sha = result.stdout.split()[0]
469
+ if remote_sha == sha and not is_critical_empty:
470
+ # File already exists and matches, skip upload
471
+ continue
472
+
473
+ # Get file content from storage
474
+ content = get_file_content(sha)
475
+ if content is None:
476
+ raise DeploymentError(f"File not found in storage: {path} (sha: {sha})", stage="Code Upload")
477
+
478
+ # Apply placeholder if critical and empty
479
+ if is_critical_empty:
480
+ content = b"# xenfra placeholder\n"
481
+ logger(f" - [Zen Mode] Injected placeholder into empty {path}")
482
+
483
+ # Create directory if needed
484
+ dir_path = os.path.dirname(path)
485
+ if dir_path:
486
+ conn.run(f"mkdir -p /root/app/{dir_path}", warn=True, hide=True)
487
+
488
+ # Use SFTP for file transfer (handles large files)
489
+ from io import BytesIO
490
+ conn.put(BytesIO(content), remote_path)
491
+
492
+ # Progress update every 10 files
493
+ if (i + 1) % 10 == 0 or i == len(file_manifest) - 1:
494
+ logger(f" - Synced {i + 1}/{len(file_manifest)} files...")
365
495
 
366
- fabric.transfer.Transfer(conn).upload(
367
- ".", "/root/app", exclude=[".git", ".venv", "__pycache__"]
368
- )
496
+ logger(f" - All {len(file_manifest)} files synced.")
497
+
498
+ # Option 3: Local rsync (only works locally, not in service mode)
499
+ else:
500
+ # Note: Early validation in Phase 0 should have caught this for service mode
501
+ private_key_path = str(Path.home() / ".ssh" / "id_rsa")
502
+ rsync_cmd = f'rsync -avz --exclude=".git" --exclude=".venv" --exclude="__pycache__" -e "ssh -i {private_key_path} -o StrictHostKeyChecking=no" . root@{ip_address}:/root/app/'
503
+ logger(f" - Uploading local code via rsync...")
504
+ result = subprocess.run(rsync_cmd, shell=True, capture_output=True, text=True)
505
+ if result.returncode != 0:
506
+ raise DeploymentError(f"rsync failed: {result.stderr}", stage="Code Upload")
369
507
  logger(" - Code upload complete.")
370
508
 
509
+
510
+ # === 6.5. WRITE DEPLOYMENT ASSETS TO DROPLET ===
511
+ logger("\n[bold blue]PHASE 6.5: WRITING DEPLOYMENT ASSETS[/bold blue]")
512
+ with self._get_connection(ip_address) as conn:
513
+ for filename, content in rendered_assets.items():
514
+ # Use heredoc with unique delimiter to write file content
515
+ # Single-quoted delimiter prevents shell variable expansion
516
+ logger(f" - Writing {filename}...")
517
+ try:
518
+ # Use base64 encoding to safely transfer file content
519
+ # Use printf to avoid issues with special characters
520
+ import base64
521
+ encoded_content = base64.b64encode(content.encode()).decode()
522
+ # Use printf with %s to handle any special characters in base64
523
+ conn.run(f"printf '%s' '{encoded_content}' | base64 -d > /root/app/{filename}")
524
+ except Exception as e:
525
+ raise DeploymentError(f"Failed to write {filename}: {e}", stage="Asset Write")
526
+ logger(" - Deployment assets written.")
527
+
371
528
  # === 7. FINAL DEPLOY STAGE ===
372
529
  if is_dockerized:
373
530
  logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
374
531
  with self._get_connection(ip_address) as conn:
375
- result = conn.run("cd /root/app && docker-compose up -d --build", hide=True)
532
+ # Force --no-cache to ensure updated files (like README.md placeholders) are used
533
+ result = conn.run("cd /root/app && docker compose build --no-cache && docker compose up -d", hide=True)
376
534
  if result.failed:
377
535
  raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
378
- logger(" - Docker containers are building in the background...")
536
+ logger(" - Docker build complete, containers starting...")
379
537
  else:
380
538
  logger("\n[bold blue]PHASE 7: STARTING HOST-BASED APPLICATION[/bold blue]")
381
539
  start_command = context.get("command", f"uvicorn main:app --port {context.get('port', 8000)}")
@@ -387,14 +545,23 @@ class InfraEngine:
387
545
 
388
546
  # === 8. VERIFICATION STAGE ===
389
547
  logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
548
+
549
+ # Give container a moment to initialize before first health check
550
+ time.sleep(5)
551
+
390
552
  app_port = context.get("port", 8000)
391
553
  for i in range(24): # 2-minute timeout for health checks
392
554
  logger(f" - Health check attempt {i + 1}/24...")
393
555
  with self._get_connection(ip_address) as conn:
394
556
  # Check if running
395
557
  if is_dockerized:
396
- ps_result = conn.run("cd /root/app && docker-compose ps", hide=True)
397
- running = "running" in ps_result.stdout
558
+ ps_result = conn.run("cd /root/app && docker compose ps", hide=True)
559
+ ps_output = ps_result.stdout.lower()
560
+ # Docker Compose V1 shows "running", V2 shows "Up" in status
561
+ running = "running" in ps_output or " up " in ps_output
562
+ if "restarting" in ps_output:
563
+ logs = conn.run("cd /root/app && docker compose logs --tail 20", hide=True).stdout
564
+ raise DeploymentError(f"Application is crash-looping (restarting). Logs:\n{logs}", stage="Verification")
398
565
  else:
399
566
  ps_result = conn.run("ps aux | grep -v grep | grep python", hide=True)
400
567
  running = ps_result.ok and len(ps_result.stdout.strip()) > 0
@@ -403,11 +570,19 @@ class InfraEngine:
403
570
  time.sleep(5)
404
571
  continue
405
572
 
406
- # Check if application is responsive
407
- curl_result = conn.run(
408
- f"curl -s --fail http://localhost:{app_port}/", warn=True
573
+ # Check if application is responsive (port is listening)
574
+ # Accept ANY HTTP response (including 404) - it means the app is running
575
+ # Use curl with -w to get HTTP code, accept any response >= 100
576
+ port_check = conn.run(
577
+ f"curl -s -o /dev/null -w '%{{http_code}}' --connect-timeout 3 http://localhost:{app_port}/",
578
+ warn=True, hide=True
409
579
  )
410
- if curl_result.ok:
580
+ # curl may exit non-zero for 404, but still outputs HTTP code
581
+ http_code = port_check.stdout.strip()
582
+
583
+ # Any HTTP response (200, 404, 500, etc.) means app is running
584
+ if http_code.isdigit() and int(http_code) >= 100:
585
+
411
586
  logger(
412
587
  "[bold green] - Health check passed! Application is live.[/bold green]"
413
588
  )
@@ -430,13 +605,10 @@ class InfraEngine:
430
605
  return droplet # Return the full droplet object
431
606
  time.sleep(5)
432
607
  else:
433
- # On failure, get logs and destroy droplet
608
+ # Capture logs on timeout failure
434
609
  with self._get_connection(ip_address) as conn:
435
- logs = conn.run("cd /root/app && docker-compose logs", hide=True).stdout
436
- raise DeploymentError(
437
- f"Application failed to become healthy in time. Logs:\n{logs}",
438
- stage="Verification",
439
- )
610
+ logs = conn.run("cd /root/app && docker compose logs --tail 50", hide=True, warn=True).stdout if is_dockerized else ""
611
+ raise DeploymentError(f"Application failed to become healthy in time. Logs:\n{logs}", stage="Verification")
440
612
 
441
613
  except Exception as e:
442
614
  if droplet:
@@ -158,6 +158,7 @@ class CodebaseAnalysisResponse(BaseModel):
158
158
  """
159
159
 
160
160
  framework: str = Field(..., description="Detected framework (fastapi, flask, django)")
161
+ entrypoint: str | None = Field(None, description="Application entrypoint (e.g., 'todo.main:app')")
161
162
  port: int = Field(..., description="Detected application port")
162
163
  database: str = Field(..., description="Detected database (postgresql, mysql, sqlite, none)")
163
164
  cache: str | None = Field(None, description="Detected cache (redis, memcached, none)")
@@ -97,7 +97,7 @@ class DeploymentsManager(BaseManager):
97
97
  except Exception as e:
98
98
  raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
99
99
 
100
- def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True) -> Iterator[dict]:
100
+ def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, entrypoint: str = None, database: str = None, package_manager: str = None, dependency_file: str = None, file_manifest: list = None) -> Iterator[dict]:
101
101
  """
102
102
  Creates a new deployment with real-time SSE log streaming.
103
103
 
@@ -105,11 +105,19 @@ class DeploymentsManager(BaseManager):
105
105
 
106
106
  Args:
107
107
  project_name: Name of the project
108
- git_repo: Git repository URL
108
+ git_repo: Git repository URL (optional if file_manifest provided)
109
109
  branch: Git branch to deploy
110
110
  framework: Framework type (fastapi, flask, django)
111
111
  region: DigitalOcean region (optional)
112
112
  size_slug: DigitalOcean droplet size (optional)
113
+ is_dockerized: Whether to use Docker (optional)
114
+ port: Application port (optional, default 8000)
115
+ command: Start command (optional, auto-detected if not provided)
116
+ entrypoint: Application entrypoint (optional, e.g. 'todo.main:app')
117
+ database: Database type (optional, e.g. 'postgres')
118
+ package_manager: Package manager (optional, e.g. 'pip', 'uv')
119
+ dependency_file: Dependency file (optional, e.g. 'requirements.txt')
120
+ file_manifest: List of files for delta upload [{path, sha, size}, ...]
113
121
 
114
122
  Yields:
115
123
  dict: SSE events with 'event' and 'data' fields
@@ -133,6 +141,21 @@ class DeploymentsManager(BaseManager):
133
141
  payload["size_slug"] = size_slug
134
142
  if is_dockerized is not None:
135
143
  payload["is_dockerized"] = is_dockerized
144
+ if port:
145
+ payload["port"] = port
146
+ if command:
147
+ payload["command"] = command
148
+ if entrypoint:
149
+ payload["entrypoint"] = entrypoint
150
+ if database:
151
+ payload["database"] = database
152
+ if package_manager:
153
+ payload["package_manager"] = package_manager
154
+ if dependency_file:
155
+ payload["dependency_file"] = dependency_file
156
+ if file_manifest:
157
+ payload["file_manifest"] = file_manifest
158
+
136
159
 
137
160
  try:
138
161
  # Use httpx to stream the SSE response
@@ -150,11 +173,8 @@ class DeploymentsManager(BaseManager):
150
173
  streaming_api_url = os.getenv("XENFRA_STREAMING_API_URL")
151
174
  if streaming_api_url:
152
175
  base_url = streaming_api_url
153
- elif self._client.api_url == "https://api.xenfra.tech":
154
- # Production: use non-proxied streaming subdomain
155
- base_url = "https://stream.xenfra.tech"
156
176
  else:
157
- # Local/dev: use regular API URL
177
+ # Local/dev/production: use regular API URL
158
178
  base_url = self._client.api_url
159
179
 
160
180
  url = f"{base_url}/deployments/stream"
@@ -0,0 +1,101 @@
1
+ """
2
+ Files resource manager for delta uploads.
3
+
4
+ Provides methods to check file cache status and upload files to the server.
5
+ """
6
+
7
+ from typing import Dict, List
8
+
9
+
10
+ class FilesManager:
11
+ """Manager for file upload operations."""
12
+
13
+ def __init__(self, client):
14
+ """
15
+ Initialize the FilesManager.
16
+
17
+ Args:
18
+ client: The XenfraClient instance.
19
+ """
20
+ self._client = client
21
+
22
+ def check(self, files: List[Dict]) -> Dict:
23
+ """
24
+ Check which files are missing from server cache.
25
+
26
+ Args:
27
+ files: List of file info dicts with keys: path, sha, size
28
+
29
+ Returns:
30
+ Dict with keys:
31
+ - missing: List of SHA hashes that need to be uploaded
32
+ - cached: Number of files already cached on server
33
+ """
34
+ payload = {
35
+ "files": [
36
+ {"path": f["path"], "sha": f["sha"], "size": f["size"]}
37
+ for f in files
38
+ ]
39
+ }
40
+
41
+ response = self._client._request("POST", "/files/check", json=payload)
42
+ return response.json()
43
+
44
+ def upload(self, content: bytes, sha: str, path: str) -> Dict:
45
+ """
46
+ Upload a single file to the server.
47
+
48
+ Args:
49
+ content: Raw file content as bytes
50
+ sha: SHA256 hash of the content
51
+ path: Relative file path
52
+
53
+ Returns:
54
+ Dict with keys: sha, size, stored
55
+ """
56
+ import httpx
57
+
58
+ headers = {
59
+ "Authorization": f"Bearer {self._client._token}",
60
+ "Content-Type": "application/octet-stream",
61
+ "X-Xenfra-Sha": sha,
62
+ "X-Xenfra-Path": path,
63
+ }
64
+
65
+ response = httpx.post(
66
+ f"{self._client.api_url}/files/upload",
67
+ content=content,
68
+ headers=headers,
69
+ timeout=120.0, # 2 minutes for large files
70
+ )
71
+ response.raise_for_status()
72
+ return response.json()
73
+
74
+ def upload_files(self, files: List[Dict], missing_shas: List[str], progress_callback=None) -> int:
75
+ """
76
+ Upload multiple files that are missing from the server.
77
+
78
+ Args:
79
+ files: List of file info dicts with keys: path, sha, size, abs_path
80
+ missing_shas: List of SHA hashes that need to be uploaded
81
+ progress_callback: Optional callback(uploaded_count, total_count)
82
+
83
+ Returns:
84
+ Number of files uploaded
85
+ """
86
+ missing_set = set(missing_shas)
87
+ files_to_upload = [f for f in files if f["sha"] in missing_set]
88
+ total = len(files_to_upload)
89
+ uploaded = 0
90
+
91
+ for file_info in files_to_upload:
92
+ with open(file_info["abs_path"], "rb") as f:
93
+ content = f.read()
94
+
95
+ self.upload(content, file_info["sha"], file_info["path"])
96
+ uploaded += 1
97
+
98
+ if progress_callback:
99
+ progress_callback(uploaded, total)
100
+
101
+ return uploaded
@@ -0,0 +1,38 @@
1
+ # Dockerfile template for Python web applications
2
+ FROM {{ python_version | default('python:3.11-slim') }}
3
+
4
+ WORKDIR /app
5
+
6
+ {% if package_manager != 'pip' %}
7
+ # Install uv package manager and add to PATH
8
+ RUN apt-get update && apt-get install -y curl && \
9
+ curl -LsSf https://astral.sh/uv/install.sh | sh && \
10
+ apt-get remove -y curl && \
11
+ apt-get clean && \
12
+ rm -rf /var/lib/apt/lists/*
13
+ ENV PATH="/root/.local/bin:/root/.cargo/bin:$PATH"
14
+ {% endif %}
15
+
16
+ {% if dependency_file == 'pyproject.toml' %}
17
+ # For pyproject.toml, copy all files first (hatchling needs README.md etc.)
18
+ COPY . .
19
+
20
+ # Install dependencies
21
+ RUN uv pip install --system --no-cache .
22
+ {% else %}
23
+ COPY {{ dependency_file | default('requirements.txt') }} .
24
+
25
+ # Install dependencies
26
+ {% if package_manager == 'pip' %}
27
+ RUN pip install --no-cache-dir -r {{ dependency_file | default('requirements.txt') }}
28
+ {% else %}
29
+ RUN uv pip install --system --no-cache -r {{ dependency_file | default('requirements.txt') }}
30
+ {% endif %}
31
+
32
+ COPY . .
33
+ {% endif %}
34
+
35
+ # Expose the application port
36
+ EXPOSE {{ port | default(8000) }}
37
+
38
+ # The command to run the application will be in docker-compose.yml
@@ -1,22 +1,21 @@
1
1
  # docker-compose.yml template
2
- version: '3.8'
3
2
 
4
3
  services:
5
4
  app:
6
5
  build: .
7
6
  ports:
8
7
  - "{{ port | default(8000) }}:{{ port | default(8000) }}"
9
- volumes:
10
- - .:/app
8
+ {% if command and command != 'None' %}
11
9
  command: {{ command }}
12
- {% if database == 'postgres' %}
10
+ {% else %}
11
+ command: ["uvicorn", "{{ entrypoint | default('src.main:app') }}", "--host", "0.0.0.0", "--port", "{{ port | default(8000) }}"]
12
+ {% endif %}
13
+ {% if database == 'postgres' or database == 'postgresql' %}
13
14
  depends_on:
14
15
  - db
15
16
  environment:
16
17
  - DATABASE_URL=postgresql://{{ db_user | default('user') }}:{{ db_password | default('password') }}@db:5432/{{ db_name | default('appdb') }}
17
- {% endif %}
18
18
 
19
- {% if database == 'postgres' %}
20
19
  db:
21
20
  image: postgres:15-alpine
22
21
  volumes:
@@ -25,9 +24,7 @@ services:
25
24
  - POSTGRES_USER={{ db_user | default('user') }}
26
25
  - POSTGRES_PASSWORD={{ db_password | default('password') }}
27
26
  - POSTGRES_DB={{ db_name | default('appdb') }}
28
- {% endif %}
29
27
 
30
28
  volumes:
31
- {% if database == 'postgres' %}
32
29
  postgres_data:
33
- {% endif %}
30
+ {% endif %}
@@ -1,104 +0,0 @@
1
- import os
2
- from pathlib import Path
3
-
4
- from jinja2 import Environment, FileSystemLoader
5
-
6
-
7
- def detect_framework(path="."):
8
- """
9
- Scans common Python project structures to guess the framework and entrypoint.
10
- Returns: (framework_name, default_port, start_command) or (None, None, None)
11
- """
12
- project_root = Path(path).resolve()
13
-
14
- # Check for Django first (common pattern: manage.py in root)
15
- if (project_root / "manage.py").is_file():
16
- # Assume the project name is the current directory name
17
- project_name = project_root.name
18
- return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
19
-
20
- candidate_files = []
21
-
22
- # Check directly in project root
23
- for name in ["main.py", "app.py"]:
24
- if (project_root / name).is_file():
25
- candidate_files.append(project_root / name)
26
-
27
- # Check in src/*/ (standard package layout)
28
- for src_dir in project_root.glob("src/*"):
29
- if src_dir.is_dir():
30
- for name in ["main.py", "app.py"]:
31
- if (src_dir / name).is_file():
32
- candidate_files.append(src_dir / name)
33
-
34
- for file_path in candidate_files:
35
- with open(file_path, "r") as f:
36
- content = f.read()
37
-
38
- try:
39
- module_name = str(file_path.relative_to(project_root)).replace(os.sep, '.')[:-3]
40
- # If path is like src/testdeploy/main.py, module_name becomes src.testdeploy.main
41
- if module_name.startswith("src."):
42
- # Strip the "src." prefix for gunicorn/uvicorn
43
- module_name = module_name[4:]
44
- except ValueError:
45
- module_name = file_path.stem
46
-
47
- if "FastAPI" in content:
48
- # Use standard :app convention
49
- return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
50
-
51
- if "Flask" in content:
52
- return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
53
-
54
- return None, None, None
55
-
56
-
57
- def generate_templated_assets(context: dict):
58
- """
59
- Generates deployment assets (Dockerfile, docker-compose.yml) using Jinja2 templates.
60
-
61
- Args:
62
- context: A dictionary containing information for rendering templates,
63
- e.g., {'database': 'postgres', 'python_version': 'python:3.11-slim'}
64
- """
65
- # Path to the templates directory
66
- template_dir = Path(__file__).parent / "templates"
67
- env = Environment(loader=FileSystemLoader(template_dir))
68
-
69
- # Detect framework specifics (use context if available, otherwise fallback to manual)
70
- detected_framework, detected_port, detected_command = detect_framework()
71
-
72
- framework = context.get("framework") or detected_framework
73
- port = context.get("port") or detected_port
74
- command = context.get("command") or detected_command
75
-
76
- if not framework:
77
- print("Warning: No recognizable web framework detected and no framework provided in context.")
78
- return []
79
-
80
- # Merge detected values with provided context (context takes precedence)
81
- render_context = {
82
- "framework": framework,
83
- "port": port,
84
- "command": command,
85
- **context
86
- }
87
-
88
- generated_files = []
89
-
90
- # --- 1. Dockerfile ---
91
- dockerfile_template = env.get_template("Dockerfile.j2")
92
- dockerfile_content = dockerfile_template.render(render_context)
93
- with open("Dockerfile", "w") as f:
94
- f.write(dockerfile_content)
95
- generated_files.append("Dockerfile")
96
-
97
- # --- 2. docker-compose.yml ---
98
- compose_template = env.get_template("docker-compose.yml.j2")
99
- compose_content = compose_template.render(render_context)
100
- with open("docker-compose.yml", "w") as f:
101
- f.write(compose_content)
102
- generated_files.append("docker-compose.yml")
103
-
104
- return generated_files
@@ -1,25 +0,0 @@
1
- # Dockerfile template for Python web applications
2
- FROM {{ python_version | default('python:3.11-slim') }}
3
-
4
- WORKDIR /app
5
-
6
- # Install uv, our preferred package manager
7
- RUN apt-get update && apt-get install -y curl && \
8
- curl -LsSf https://astral.sh/uv/install.sh | sh && \
9
- apt-get remove -y curl && \
10
- apt-get clean && \
11
- rm -rf /var/lib/apt/lists/*
12
-
13
- COPY requirements.txt .
14
-
15
- # Install dependencies
16
- RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
17
-
18
- COPY . .
19
-
20
- # Expose the application port
21
- EXPOSE {{ port | default(8000) }}
22
-
23
- # The command to run the application will be in docker-compose.yml
24
- # This allows for more flexibility
25
-
File without changes