xenfra 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra/__init__.py +1 -0
- xenfra/cli.py +162 -0
- xenfra/dockerizer.py +129 -0
- xenfra/engine.py +264 -0
- xenfra/recipes.py +115 -0
- xenfra/utils.py +69 -0
- xenfra-0.1.0.dist-info/METADATA +128 -0
- xenfra-0.1.0.dist-info/RECORD +10 -0
- xenfra-0.1.0.dist-info/WHEEL +4 -0
- xenfra-0.1.0.dist-info/entry_points.txt +3 -0
xenfra/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# This file makes src/xenfra a Python package.
|
xenfra/cli.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
from rich.console import Console
|
|
2
|
+
from rich.table import Table
|
|
3
|
+
from rich.prompt import Prompt, Confirm, IntPrompt
|
|
4
|
+
from rich.panel import Panel
|
|
5
|
+
|
|
6
|
+
# Import Custom Logic
|
|
7
|
+
from xenfra.engine import InfraEngine
|
|
8
|
+
|
|
9
|
+
console = Console()
|
|
10
|
+
|
|
11
|
+
# --- CONSTANTS ---
|
|
12
|
+
SIZES = {
|
|
13
|
+
"1": ("s-1vcpu-1gb", "$6/mo", "1GB RAM / 1 CPU"),
|
|
14
|
+
"2": ("s-1vcpu-2gb", "$12/mo", "2GB RAM / 1 CPU"),
|
|
15
|
+
"3": ("s-2vcpu-4gb", "$24/mo", "4GB RAM / 2 CPU"),
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
IMAGES = {
|
|
19
|
+
"1": ("ubuntu-24-04-x64", "Ubuntu 24.04 LTS"),
|
|
20
|
+
"2": ("debian-12-x64", "Debian 12"),
|
|
21
|
+
"3": ("fedora-39-x64", "Fedora 39"),
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
def print_header(email):
|
|
25
|
+
console.clear()
|
|
26
|
+
grid = Table.grid(expand=True)
|
|
27
|
+
grid.add_column(justify="center", ratio=1)
|
|
28
|
+
grid.add_row("[bold cyan]๐ง XENFRA[/bold cyan] [dim]v0.1.0[/dim]")
|
|
29
|
+
grid.add_row("[italic]Infrastructure in Zen Mode[/italic]")
|
|
30
|
+
console.print(Panel(
|
|
31
|
+
grid,
|
|
32
|
+
style="cyan",
|
|
33
|
+
subtitle=f"๐ค {email} | ๐ง support@xenfracloud.com"
|
|
34
|
+
))
|
|
35
|
+
|
|
36
|
+
def deploy_menu(engine: InfraEngine):
|
|
37
|
+
console.print("\n[bold green]๐ ๏ธ CONFIGURE YOUR SERVER[/bold green]")
|
|
38
|
+
|
|
39
|
+
# 1. Name
|
|
40
|
+
name = Prompt.ask(" ๐ท๏ธ Server Name", default="xenfra-node-01")
|
|
41
|
+
|
|
42
|
+
# 2. Image Selection
|
|
43
|
+
console.print("\n [bold]Select Operating System:[/bold]")
|
|
44
|
+
for k, v in IMAGES.items():
|
|
45
|
+
console.print(f" [cyan]{k}[/cyan]. {v[1]}")
|
|
46
|
+
img_choice = Prompt.ask(" Choice", choices=list(IMAGES.keys()), default="1")
|
|
47
|
+
selected_image = IMAGES[img_choice][0]
|
|
48
|
+
|
|
49
|
+
# 3. Size Selection
|
|
50
|
+
console.print("\n [bold]Select Power Size:[/bold]")
|
|
51
|
+
for k, v in SIZES.items():
|
|
52
|
+
console.print(f" [cyan]{k}[/cyan]. {v[2]} ({v[1]})")
|
|
53
|
+
size_choice = Prompt.ask(" Choice", choices=list(SIZES.keys()), default="1")
|
|
54
|
+
selected_size = SIZES[size_choice][0]
|
|
55
|
+
|
|
56
|
+
# 4. Final Confirm
|
|
57
|
+
if not Confirm.ask(f"\n๐ Deploy [cyan]{name}[/cyan]?"):
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# 5. EXECUTE (Delegate to Engine)
|
|
61
|
+
try:
|
|
62
|
+
# This is now a single, blocking call to the stateful engine.
|
|
63
|
+
# The engine handles all the complexity. The CLI just shows the final result.
|
|
64
|
+
result = engine.deploy_server(
|
|
65
|
+
name=name,
|
|
66
|
+
region="blr1", # region can be prompted for in the future
|
|
67
|
+
size=selected_size,
|
|
68
|
+
image=selected_image,
|
|
69
|
+
logger=console.log
|
|
70
|
+
)
|
|
71
|
+
console.print(Panel(
|
|
72
|
+
f"[bold green]โ
SERVER ONLINE & DEPLOYED![/bold green]\n\n"
|
|
73
|
+
f"๐ IP Address: [bold cyan]{result['ip']}[/bold cyan]\n"
|
|
74
|
+
f"๐ App Path: [yellow]/root/app[/yellow]\n"
|
|
75
|
+
f"๐ Login: ssh root@{result['ip']}",
|
|
76
|
+
title="Deployment Success",
|
|
77
|
+
border_style="green"
|
|
78
|
+
))
|
|
79
|
+
except Exception as e:
|
|
80
|
+
console.print(Panel(
|
|
81
|
+
f"[bold red]โ ๏ธ DEPLOYMENT FAILED[/bold red]\n\n"
|
|
82
|
+
f"The engine reported a critical error: {e}\n"
|
|
83
|
+
f"Check logs if available on the server.",
|
|
84
|
+
title="Fatal Error",
|
|
85
|
+
border_style="red"
|
|
86
|
+
))
|
|
87
|
+
|
|
88
|
+
Prompt.ask("\nPress Enter to return to menu")
|
|
89
|
+
|
|
90
|
+
def list_menu(engine: InfraEngine):
|
|
91
|
+
console.print("\n[bold]๐ก SCANNING FLEET...[/bold]")
|
|
92
|
+
with console.status("Calling DigitalOcean API..."):
|
|
93
|
+
servers = engine.list_servers()
|
|
94
|
+
|
|
95
|
+
if not servers:
|
|
96
|
+
console.print("[yellow] No active servers found.[/yellow]")
|
|
97
|
+
else:
|
|
98
|
+
table = Table(show_header=True, header_style="bold magenta")
|
|
99
|
+
table.add_column("ID", style="dim", width=12)
|
|
100
|
+
table.add_column("Name", style="cyan")
|
|
101
|
+
table.add_column("IP Address", style="green")
|
|
102
|
+
table.add_column("Status")
|
|
103
|
+
table.add_column("Cost", justify="right")
|
|
104
|
+
for s in servers:
|
|
105
|
+
table.add_row(str(s.id), s.name, s.ip_address, s.status, f"${s.size['price_monthly']}/mo")
|
|
106
|
+
console.print(table)
|
|
107
|
+
Prompt.ask("\nPress Enter to return to menu")
|
|
108
|
+
|
|
109
|
+
def destroy_menu(engine: InfraEngine):
|
|
110
|
+
console.print("\n[bold red]๐งจ DESTROY SERVER[/bold red]")
|
|
111
|
+
with console.status("Calling DigitalOcean API..."):
|
|
112
|
+
servers = engine.list_servers()
|
|
113
|
+
|
|
114
|
+
if not servers:
|
|
115
|
+
console.print("[yellow] No servers to destroy.[/yellow]")
|
|
116
|
+
Prompt.ask("\nPress Enter to return")
|
|
117
|
+
return
|
|
118
|
+
|
|
119
|
+
for i, s in enumerate(servers):
|
|
120
|
+
console.print(f" [{i+1}] {s.name} ({s.ip_address})")
|
|
121
|
+
|
|
122
|
+
choice = IntPrompt.ask("\n Select Server to Destroy (0 to cancel)", choices=[str(i) for i in range(len(servers) + 1)], show_choices=False)
|
|
123
|
+
if choice == 0:
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
target = servers[choice-1]
|
|
127
|
+
|
|
128
|
+
if Confirm.ask(f" Are you SURE you want to delete [red]{target.name}[/red]?"):
|
|
129
|
+
with console.status(f"๐ฅ Destroying {target.name}..."):
|
|
130
|
+
engine.destroy_server(target.id)
|
|
131
|
+
console.print(f"[green] Server {target.name} destroyed.[/green]")
|
|
132
|
+
|
|
133
|
+
Prompt.ask("\nPress Enter to return")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def main():
|
|
137
|
+
try:
|
|
138
|
+
engine = InfraEngine()
|
|
139
|
+
user = engine.get_user_info()
|
|
140
|
+
while True:
|
|
141
|
+
print_header(user['email'])
|
|
142
|
+
console.print("\n[bold]MAIN MENU:[/bold]")
|
|
143
|
+
console.print(" [1] ๐ Deploy New Server")
|
|
144
|
+
console.print(" [2] ๐ List Active Servers")
|
|
145
|
+
console.print(" [3] ๐งจ Destroy a Server")
|
|
146
|
+
console.print(" [4] ๐ช Exit")
|
|
147
|
+
choice = Prompt.ask("\n ๐ Select Option", choices=["1", "2", "3", "4"], default="2")
|
|
148
|
+
|
|
149
|
+
if choice == "1":
|
|
150
|
+
deploy_menu(engine)
|
|
151
|
+
elif choice == "2":
|
|
152
|
+
list_menu(engine)
|
|
153
|
+
elif choice == "3":
|
|
154
|
+
destroy_menu(engine)
|
|
155
|
+
elif choice == "4":
|
|
156
|
+
console.print("[cyan]Goodbye, CEO.[/cyan]")
|
|
157
|
+
break
|
|
158
|
+
except Exception as e:
|
|
159
|
+
console.print(f"[bold red]CRITICAL ERROR:[/bold red] {e}")
|
|
160
|
+
|
|
161
|
+
if __name__ == "__main__":
|
|
162
|
+
main()
|
xenfra/dockerizer.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
def detect_framework():
|
|
4
|
+
"""
|
|
5
|
+
Scans files to guess the framework and entrypoint.
|
|
6
|
+
Returns: (framework_name, default_port, start_command) or (None, None, None)
|
|
7
|
+
"""
|
|
8
|
+
# 1. FastAPI Scan
|
|
9
|
+
if os.path.exists("main.py"):
|
|
10
|
+
with open("main.py", "r") as f:
|
|
11
|
+
content = f.read()
|
|
12
|
+
if "FastAPI" in content:
|
|
13
|
+
return "fastapi", 8000, "uvicorn main:app --host 0.0.0.0 --port 8000"
|
|
14
|
+
|
|
15
|
+
# 2. Flask Scan
|
|
16
|
+
if os.path.exists("app.py"):
|
|
17
|
+
with open("app.py", "r") as f:
|
|
18
|
+
content = f.read()
|
|
19
|
+
if "Flask" in content:
|
|
20
|
+
return "flask", 5000, "gunicorn app:app -b 0.0.0.0:5000"
|
|
21
|
+
|
|
22
|
+
# No framework detected
|
|
23
|
+
return None, None, None
|
|
24
|
+
|
|
25
|
+
def generate_deployment_assets(context):
|
|
26
|
+
"""
|
|
27
|
+
Creates Dockerfile, docker-compose.yml, and Caddyfile
|
|
28
|
+
if a web framework is detected.
|
|
29
|
+
"""
|
|
30
|
+
framework, port, cmd = detect_framework()
|
|
31
|
+
if not framework:
|
|
32
|
+
return []
|
|
33
|
+
|
|
34
|
+
generated = []
|
|
35
|
+
print(f" โจ Web framework detected: {framework.upper()}. Generating deployment assets...")
|
|
36
|
+
|
|
37
|
+
# --- 1. DOCKERFILE ---
|
|
38
|
+
if not os.path.exists("Dockerfile"):
|
|
39
|
+
# Use UV if detected, otherwise Pip
|
|
40
|
+
install_cmd = "RUN pip install --no-cache-dir -r requirements.txt"
|
|
41
|
+
if context.get("type") == "uv":
|
|
42
|
+
# Assuming uv.lock or pyproject.toml is copied
|
|
43
|
+
install_cmd = "RUN pip install uv && uv pip install --system -r requirements.txt"
|
|
44
|
+
|
|
45
|
+
dockerfile_content = f"""# Generated by Xenfra
|
|
46
|
+
FROM python:3.11-slim
|
|
47
|
+
|
|
48
|
+
WORKDIR /app
|
|
49
|
+
|
|
50
|
+
# Install System Deps
|
|
51
|
+
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
|
|
52
|
+
|
|
53
|
+
# Copy all dependency definition files
|
|
54
|
+
COPY requirements.txt* .
|
|
55
|
+
COPY pyproject.toml* .
|
|
56
|
+
COPY uv.lock* .
|
|
57
|
+
|
|
58
|
+
# Install Python Deps
|
|
59
|
+
{install_cmd}
|
|
60
|
+
|
|
61
|
+
# Copy Code
|
|
62
|
+
COPY . .
|
|
63
|
+
|
|
64
|
+
# Expose Port
|
|
65
|
+
EXPOSE {port}
|
|
66
|
+
|
|
67
|
+
# Start Command
|
|
68
|
+
CMD ["/bin/sh", "-c", "{cmd}"]
|
|
69
|
+
"""
|
|
70
|
+
with open("Dockerfile", "w") as f:
|
|
71
|
+
f.write(dockerfile_content)
|
|
72
|
+
generated.append("Dockerfile")
|
|
73
|
+
|
|
74
|
+
# --- 2. CADDYFILE ---
|
|
75
|
+
if not os.path.exists("Caddyfile"):
|
|
76
|
+
caddy_content = f"""{{
|
|
77
|
+
# Enable automatic HTTPS for all sites
|
|
78
|
+
email your_email@example.com
|
|
79
|
+
}}
|
|
80
|
+
|
|
81
|
+
:443 {{
|
|
82
|
+
reverse_proxy app:{port}
|
|
83
|
+
}}
|
|
84
|
+
|
|
85
|
+
:80 {{
|
|
86
|
+
redir https://{{host}}{{uri}}
|
|
87
|
+
}}
|
|
88
|
+
"""
|
|
89
|
+
with open("Caddyfile", "w") as f:
|
|
90
|
+
f.write(caddy_content)
|
|
91
|
+
generated.append("Caddyfile")
|
|
92
|
+
|
|
93
|
+
# --- 3. DOCKER COMPOSE ---
|
|
94
|
+
if not os.path.exists("docker-compose.yml"):
|
|
95
|
+
compose_content = f"""
|
|
96
|
+
version: '3.8'
|
|
97
|
+
|
|
98
|
+
services:
|
|
99
|
+
app:
|
|
100
|
+
build: .
|
|
101
|
+
container_name: xenfra_app
|
|
102
|
+
restart: always
|
|
103
|
+
# The internal port is exposed to other services in this network
|
|
104
|
+
expose:
|
|
105
|
+
- "{port}"
|
|
106
|
+
|
|
107
|
+
caddy:
|
|
108
|
+
image: caddy:latest
|
|
109
|
+
container_name: xenfra_caddy
|
|
110
|
+
restart: always
|
|
111
|
+
ports:
|
|
112
|
+
- "80:80"
|
|
113
|
+
- "443:443"
|
|
114
|
+
volumes:
|
|
115
|
+
- ./Caddyfile:/etc/caddy/Caddyfile
|
|
116
|
+
- caddy_data:/data
|
|
117
|
+
- caddy_config:/config
|
|
118
|
+
depends_on:
|
|
119
|
+
- app
|
|
120
|
+
|
|
121
|
+
volumes:
|
|
122
|
+
caddy_data:
|
|
123
|
+
caddy_config:
|
|
124
|
+
"""
|
|
125
|
+
with open("docker-compose.yml", "w") as f:
|
|
126
|
+
f.write(compose_content)
|
|
127
|
+
generated.append("docker-compose.yml")
|
|
128
|
+
|
|
129
|
+
return generated
|
xenfra/engine.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
import digitalocean
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from fabric import Connection
|
|
7
|
+
|
|
8
|
+
# Internal imports
|
|
9
|
+
from .utils import get_project_context
|
|
10
|
+
from .recipes import generate_stack
|
|
11
|
+
from .dockerizer import detect_framework, generate_deployment_assets
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InfraEngine:
|
|
15
|
+
"""
|
|
16
|
+
The Core SDK.
|
|
17
|
+
This class handles all communication with DigitalOcean.
|
|
18
|
+
It is designed to be used by a CLI or an AI Agent.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
# Load secrets immediately
|
|
23
|
+
load_dotenv()
|
|
24
|
+
self.token = os.getenv("DIGITAL_OCEAN_TOKEN")
|
|
25
|
+
|
|
26
|
+
if not self.token:
|
|
27
|
+
raise ValueError("โ FATAL: No DIGITAL_OCEAN_TOKEN found in .env or environment.")
|
|
28
|
+
|
|
29
|
+
self.manager = digitalocean.Manager(token=self.token)
|
|
30
|
+
|
|
31
|
+
def get_user_info(self):
|
|
32
|
+
"""Returns the current user's email and account status."""
|
|
33
|
+
try:
|
|
34
|
+
account = self.manager.get_account()
|
|
35
|
+
return {"email": account.email, "status": account.status, "limit": account.droplet_limit}
|
|
36
|
+
except Exception as e:
|
|
37
|
+
raise ConnectionError(f"Could not connect to DigitalOcean: {e}")
|
|
38
|
+
|
|
39
|
+
def _ensure_ssh_key(self):
|
|
40
|
+
"""
|
|
41
|
+
Private method.
|
|
42
|
+
1. Dynamically finds local public SSH keys in ~/.ssh/.
|
|
43
|
+
2. Checks if any of them exist on DigitalOcean.
|
|
44
|
+
3. If not, uploads the first one found.
|
|
45
|
+
Returns: The SSHKey object.
|
|
46
|
+
"""
|
|
47
|
+
# 1. Find Local Keys
|
|
48
|
+
ssh_dir = Path.home() / ".ssh"
|
|
49
|
+
pub_keys = list(ssh_dir.glob("*.pub"))
|
|
50
|
+
|
|
51
|
+
if not pub_keys:
|
|
52
|
+
raise FileNotFoundError(f"No public SSH keys found in {ssh_dir}. Run 'ssh-keygen' first.")
|
|
53
|
+
|
|
54
|
+
# 2. Check Remote Keys against Local Keys
|
|
55
|
+
remote_keys = self.manager.get_all_sshkeys()
|
|
56
|
+
remote_key_map = {k.public_key.strip(): k for k in remote_keys}
|
|
57
|
+
|
|
58
|
+
first_local_key_content = None
|
|
59
|
+
for key_path in pub_keys:
|
|
60
|
+
local_pub_key = key_path.read_text().strip()
|
|
61
|
+
if not first_local_key_content:
|
|
62
|
+
first_local_key_content = local_pub_key
|
|
63
|
+
|
|
64
|
+
if local_pub_key in remote_key_map:
|
|
65
|
+
print(f"โจ Found existing SSH key on DigitalOcean: {key_path.name}")
|
|
66
|
+
return remote_key_map[local_pub_key]
|
|
67
|
+
|
|
68
|
+
# 3. If no matches, upload the first key found
|
|
69
|
+
if not first_local_key_content:
|
|
70
|
+
raise FileNotFoundError(f"Could not read any public keys in {ssh_dir}.")
|
|
71
|
+
|
|
72
|
+
print(f"โจ No matching SSH key found on DigitalOcean. Uploading new key from {pub_keys[0].name}...")
|
|
73
|
+
new_key = digitalocean.SSHKey(token=self.token,
|
|
74
|
+
name=f"xenfra-{pub_keys[0].name}-{int(time.time())}",
|
|
75
|
+
public_key=first_local_key_content)
|
|
76
|
+
new_key.create()
|
|
77
|
+
return new_key
|
|
78
|
+
|
|
79
|
+
def _get_connection(self, ip_address: str):
|
|
80
|
+
"""Creates a standardized Fabric connection."""
|
|
81
|
+
return Connection(
|
|
82
|
+
host=ip_address,
|
|
83
|
+
user="root",
|
|
84
|
+
connect_kwargs={
|
|
85
|
+
"timeout": 10
|
|
86
|
+
}
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def _is_setup_complete(self, ip_address: str) -> bool:
|
|
90
|
+
"""Checks for the setup completion marker file on the remote server."""
|
|
91
|
+
try:
|
|
92
|
+
c = self._get_connection(ip_address)
|
|
93
|
+
if c.run("test -f /root/setup_complete", warn=True, hide=True).ok:
|
|
94
|
+
return True
|
|
95
|
+
except Exception:
|
|
96
|
+
# This can happen if the server is not ready for SSH
|
|
97
|
+
return False
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
def _auto_heal_server(self, ip_address: str):
|
|
101
|
+
"""
|
|
102
|
+
Diagnoses issues and attempts to fix them remotely.
|
|
103
|
+
This is the 'nuclear option' for a stalled setup.
|
|
104
|
+
"""
|
|
105
|
+
print(f"\n[ENGINE] ๐ INITIATING AUTO-HEAL PROTOCOL for {ip_address}...")
|
|
106
|
+
c = self._get_connection(ip_address)
|
|
107
|
+
|
|
108
|
+
# A. STOP SERVICES (Prevent Respawn)
|
|
109
|
+
print("[ENGINE] ๐ Stopping background update services...")
|
|
110
|
+
c.run("systemctl stop unattended-upgrades.service", warn=True, hide=True)
|
|
111
|
+
c.run("systemctl stop apt-daily.service", warn=True, hide=True)
|
|
112
|
+
c.run("systemctl stop apt-daily-upgrade.service", warn=True, hide=True)
|
|
113
|
+
|
|
114
|
+
# B. KILL PROCESSES
|
|
115
|
+
print("[ENGINE] ๐ช Killing stuck processes...")
|
|
116
|
+
c.run("pkill apt", warn=True, hide=True)
|
|
117
|
+
c.run("pkill apt-get", warn=True, hide=True)
|
|
118
|
+
c.run("pkill dpkg", warn=True, hide=True)
|
|
119
|
+
|
|
120
|
+
# C. REMOVE LOCKS
|
|
121
|
+
print("[ENGINE] PWN: Removing lock files...")
|
|
122
|
+
c.run("rm -f /var/lib/dpkg/lock*", warn=True, hide=True)
|
|
123
|
+
c.run("rm -f /var/lib/apt/lists/lock", warn=True, hide=True)
|
|
124
|
+
c.run("rm -f /var/cache/apt/archives/lock", warn=True, hide=True)
|
|
125
|
+
|
|
126
|
+
# D. REPAIR DPKG
|
|
127
|
+
print("[ENGINE] ๐ง Repairing package database...")
|
|
128
|
+
c.run("dpkg --configure -a", warn=True, hide=True)
|
|
129
|
+
|
|
130
|
+
# E. RE-MARK AS COMPLETE (to unblock the health check)
|
|
131
|
+
c.run("touch /root/setup_complete", warn=True, hide=True)
|
|
132
|
+
print("[ENGINE] โ
Auto-Heal Complete.")
|
|
133
|
+
|
|
134
|
+
def upload_code(self, ip_address: str) -> bool:
|
|
135
|
+
"""Uses Fabric to upload project files to the /root/app directory."""
|
|
136
|
+
print(f"\n[ENGINE] ๐ INITIATING CODE AIRLIFT to {ip_address}...")
|
|
137
|
+
|
|
138
|
+
ignored = {'.git', '.venv', 'venv', '__pycache__', '.pytest_cache', '.DS_Store', '.env'}
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
c = self._get_connection(ip_address)
|
|
142
|
+
|
|
143
|
+
print("[ENGINE] ๐ค Uploading Project Files...")
|
|
144
|
+
count = 0
|
|
145
|
+
for root, dirs, files in os.walk("."):
|
|
146
|
+
dirs[:] = [d for d in dirs if d not in ignored]
|
|
147
|
+
|
|
148
|
+
for file in files:
|
|
149
|
+
if file.endswith(".pyc"): continue
|
|
150
|
+
|
|
151
|
+
local_path = os.path.join(root, file)
|
|
152
|
+
rel_path = os.path.relpath(local_path, ".")
|
|
153
|
+
remote_path = f"/root/app/{rel_path}"
|
|
154
|
+
|
|
155
|
+
remote_dir = os.path.dirname(remote_path)
|
|
156
|
+
c.run(f"mkdir -p {remote_dir}", hide=True)
|
|
157
|
+
|
|
158
|
+
c.put(local_path, remote_path)
|
|
159
|
+
count += 1
|
|
160
|
+
|
|
161
|
+
print(f"[ENGINE] โ
Airlift Complete: {count} files transferred.")
|
|
162
|
+
return True
|
|
163
|
+
|
|
164
|
+
except Exception as e:
|
|
165
|
+
print(f"[ENGINE] โ Upload failed: {e}")
|
|
166
|
+
print("[ENGINE] ๐ Ensure your SSH Agent is running and keys are added.")
|
|
167
|
+
return False
|
|
168
|
+
|
|
169
|
+
def list_servers(self):
|
|
170
|
+
"""Returns a list of all active servers."""
|
|
171
|
+
return self.manager.get_all_droplets()
|
|
172
|
+
|
|
173
|
+
def deploy_server(self, name, region="blr1", size="s-1vcpu-1gb", image="ubuntu-24-04-x64", logger=None):
|
|
174
|
+
"""
|
|
175
|
+
Provisions a new server, runs setup, and deploys the application.
|
|
176
|
+
This is a stateful, blocking method that orchestrates the entire process.
|
|
177
|
+
"""
|
|
178
|
+
def log(msg):
|
|
179
|
+
if logger:
|
|
180
|
+
logger(f"[ENGINE] {msg}")
|
|
181
|
+
else:
|
|
182
|
+
print(f"[ENGINE] {msg}")
|
|
183
|
+
|
|
184
|
+
log(f"๐ Starting deployment for '{name}'...")
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
# 1. Project Context & Asset Generation
|
|
188
|
+
log("๐ Detecting project context...")
|
|
189
|
+
context = get_project_context()
|
|
190
|
+
framework, _, _ = detect_framework()
|
|
191
|
+
is_dockerized = False
|
|
192
|
+
if framework:
|
|
193
|
+
log(f"โจ Web framework '{framework}' detected.")
|
|
194
|
+
generate_deployment_assets(context)
|
|
195
|
+
is_dockerized = True
|
|
196
|
+
|
|
197
|
+
# 2. Get SSH Key
|
|
198
|
+
ssh_key = self._ensure_ssh_key()
|
|
199
|
+
|
|
200
|
+
# 3. Generate Setup Script
|
|
201
|
+
log("๐ฆ Generating cloud-init setup script...")
|
|
202
|
+
setup_script = generate_stack(context, is_dockerized=is_dockerized)
|
|
203
|
+
|
|
204
|
+
# 4. Create Droplet
|
|
205
|
+
log(f"โ๏ธ Provisioning droplet '{name}'...")
|
|
206
|
+
droplet = digitalocean.Droplet(token=self.token,
|
|
207
|
+
name=name,
|
|
208
|
+
region=region,
|
|
209
|
+
image=image,
|
|
210
|
+
size_slug=size,
|
|
211
|
+
ssh_keys=[ssh_key],
|
|
212
|
+
user_data=setup_script)
|
|
213
|
+
droplet.create()
|
|
214
|
+
|
|
215
|
+
# 5. Wait for Droplet to become active and get IP
|
|
216
|
+
log("โณ Waiting for droplet to become active and get IP address...")
|
|
217
|
+
while not droplet.ip_address:
|
|
218
|
+
droplet.load()
|
|
219
|
+
time.sleep(5)
|
|
220
|
+
log(f"๐ Droplet is active at {droplet.ip_address}")
|
|
221
|
+
|
|
222
|
+
# 6. Health Check & Auto-Heal Loop
|
|
223
|
+
log("๐ฉบ Performing health checks on the new server...")
|
|
224
|
+
attempts = 0
|
|
225
|
+
max_retries = 18 # ~90 seconds
|
|
226
|
+
is_healthy = False
|
|
227
|
+
while attempts < max_retries:
|
|
228
|
+
if self._is_setup_complete(droplet.ip_address):
|
|
229
|
+
log("โ
Server setup complete and healthy.")
|
|
230
|
+
is_healthy = True
|
|
231
|
+
break
|
|
232
|
+
log(f" - Still waiting for setup to complete... (attempt {attempts+1}/{max_retries})")
|
|
233
|
+
time.sleep(5)
|
|
234
|
+
attempts += 1
|
|
235
|
+
|
|
236
|
+
if not is_healthy:
|
|
237
|
+
log("โ ๏ธ Server setup stalled. Initiating auto-heal protocol...")
|
|
238
|
+
self._auto_heal_server(droplet.ip_address)
|
|
239
|
+
if not self._is_setup_complete(droplet.ip_address):
|
|
240
|
+
raise Exception("Deployment failed: Auto-heal could not recover the server.")
|
|
241
|
+
log("โ
Server recovered successfully after auto-heal.")
|
|
242
|
+
|
|
243
|
+
# 7. Upload Code
|
|
244
|
+
self.upload_code(droplet.ip_address)
|
|
245
|
+
|
|
246
|
+
# 8. Run Docker Compose if applicable
|
|
247
|
+
if is_dockerized:
|
|
248
|
+
log("๐ Starting application with Docker Compose...")
|
|
249
|
+
c = self._get_connection(droplet.ip_address)
|
|
250
|
+
c.run("cd /root/app && docker-compose up -d", hide=True)
|
|
251
|
+
|
|
252
|
+
log("๐ Deployment successful!")
|
|
253
|
+
return {"status": "success", "ip": droplet.ip_address, "name": name}
|
|
254
|
+
|
|
255
|
+
except Exception as e:
|
|
256
|
+
log(f"โ CRITICAL DEPLOYMENT FAILURE: {e}")
|
|
257
|
+
# Optionally, trigger a cleanup action here
|
|
258
|
+
raise e
|
|
259
|
+
|
|
260
|
+
def destroy_server(self, droplet_id):
|
|
261
|
+
"""Destroys a server by ID. IRREVERSIBLE."""
|
|
262
|
+
droplet = self.manager.get_droplet(droplet_id)
|
|
263
|
+
droplet.destroy()
|
|
264
|
+
return True
|
xenfra/recipes.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
def generate_stack(context, is_dockerized=False):
|
|
2
|
+
"""
|
|
3
|
+
Generates a startup script.
|
|
4
|
+
INCLUDES 'AGGRESSIVE MODE' to kill background updates.
|
|
5
|
+
"""
|
|
6
|
+
project_type = context.get("type", "pip")
|
|
7
|
+
packages = context.get("packages", "")
|
|
8
|
+
|
|
9
|
+
script = """#!/bin/bash
|
|
10
|
+
export DEBIAN_FRONTEND=noninteractive
|
|
11
|
+
LOG="/root/setup.log"
|
|
12
|
+
touch $LOG
|
|
13
|
+
|
|
14
|
+
echo "--------------------------------" >> $LOG
|
|
15
|
+
echo "๐ง XENFRA: Context-Aware Boot" >> $LOG
|
|
16
|
+
echo "--------------------------------" >> $LOG
|
|
17
|
+
|
|
18
|
+
# Create App Directory
|
|
19
|
+
mkdir -p /root/app
|
|
20
|
+
cd /root/app
|
|
21
|
+
|
|
22
|
+
# --- AGGRESSIVE FIX: KILL BACKGROUND UPDATES ---
|
|
23
|
+
echo "โ๏ธ [0/6] Stopping Background Updates..." >> $LOG
|
|
24
|
+
systemctl stop unattended-upgrades.service
|
|
25
|
+
systemctl stop apt-daily.service
|
|
26
|
+
systemctl stop apt-daily-upgrade.service
|
|
27
|
+
systemctl kill --kill-who=all apt-daily.service
|
|
28
|
+
systemctl kill --kill-who=all apt-daily-upgrade.service
|
|
29
|
+
|
|
30
|
+
# Force remove locks if they exist (The Nuclear Option)
|
|
31
|
+
rm -f /var/lib/dpkg/lock*
|
|
32
|
+
rm -f /var/lib/apt/lists/lock
|
|
33
|
+
rm -f /var/cache/apt/archives/lock
|
|
34
|
+
dpkg --configure -a
|
|
35
|
+
# -----------------------------------------------
|
|
36
|
+
|
|
37
|
+
# 1. System Updates
|
|
38
|
+
echo "๐ [1/6] Refreshing Package Lists..." >> $LOG
|
|
39
|
+
apt-get update
|
|
40
|
+
apt-get install -y python3-pip python3-venv git curl
|
|
41
|
+
|
|
42
|
+
# 2. Install Docker & Compose
|
|
43
|
+
echo "๐ณ [2/6] Installing Docker..." >> $LOG
|
|
44
|
+
apt-get install -y docker.io || (curl -fsSL https://get.docker.com | sh)
|
|
45
|
+
echo "๐ถ [3/6] Installing Docker Compose..." >> $LOG
|
|
46
|
+
apt-get install -y docker-compose-v2
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
if is_dockerized:
|
|
50
|
+
script += """
|
|
51
|
+
# --- DOCKERIZED DEPLOYMENT ---
|
|
52
|
+
echo "๐ฆ [4/6] Installing Caddy..." >> $LOG
|
|
53
|
+
apt-get install -y debian-keyring debian-archive-keyring apt-transport-https
|
|
54
|
+
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
|
55
|
+
curl -LsSf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list
|
|
56
|
+
apt-get update
|
|
57
|
+
apt-get install -y caddy
|
|
58
|
+
|
|
59
|
+
echo "๐ [5/6] Starting application with Docker Compose..." >> $LOG
|
|
60
|
+
cd /root/app
|
|
61
|
+
docker-compose -f /root/app/docker-compose.yml up -d
|
|
62
|
+
|
|
63
|
+
echo "๐ [6/6] Reloading Caddy..." >> $LOG
|
|
64
|
+
# Caddy will automatically use the Caddyfile from the mounted volume in docker-compose
|
|
65
|
+
# We just need to make sure it's running. Docker-compose handles this.
|
|
66
|
+
# A system reload might be useful if Caddy was installed as a host service.
|
|
67
|
+
systemctl reload caddy || systemctl start caddy
|
|
68
|
+
"""
|
|
69
|
+
else:
|
|
70
|
+
# 3. Virtual Environment Setup (Non-Dockerized)
|
|
71
|
+
if project_type == "uv":
|
|
72
|
+
script += """
|
|
73
|
+
# --- STANDARD NON-DOCKER DEPLOYMENT (UV) ---
|
|
74
|
+
echo "โก [4/6] Installing UV..." >> $LOG
|
|
75
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
76
|
+
source $HOME/.local/bin/env
|
|
77
|
+
export PATH="/root/.local/bin:$PATH"
|
|
78
|
+
|
|
79
|
+
echo "โก [5/6] Creating UV Venv..." >> $LOG
|
|
80
|
+
uv venv /root/venv
|
|
81
|
+
source /root/venv/bin/activate
|
|
82
|
+
"""
|
|
83
|
+
else:
|
|
84
|
+
script += """
|
|
85
|
+
# --- STANDARD NON-DOCKER DEPLOYMENT (PIP) ---
|
|
86
|
+
echo "๐ [4/6] Creating Python Venv..." >> $LOG
|
|
87
|
+
python3 -m venv /root/venv
|
|
88
|
+
source /root/venv/bin/activate
|
|
89
|
+
echo "๐ [5/6] Upgrading Pip..." >> $LOG
|
|
90
|
+
pip install --upgrade pip
|
|
91
|
+
"""
|
|
92
|
+
# 4. Install Packages (Non-Dockerized)
|
|
93
|
+
if packages:
|
|
94
|
+
if project_type == "uv":
|
|
95
|
+
script += f"""
|
|
96
|
+
echo "๐ฆ [6/6] UV Installing Deps: {packages}..." >> $LOG
|
|
97
|
+
source /root/venv/bin/activate
|
|
98
|
+
uv pip install {packages}
|
|
99
|
+
"""
|
|
100
|
+
else:
|
|
101
|
+
script += f"""
|
|
102
|
+
echo "๐ฆ [6/6] Pip Installing Deps: {packages}..." >> $LOG
|
|
103
|
+
source /root/venv/bin/activate
|
|
104
|
+
pip install {packages}
|
|
105
|
+
"""
|
|
106
|
+
else:
|
|
107
|
+
script += 'echo "โน๏ธ [6/6] No packages detected." >> $LOG\n'
|
|
108
|
+
|
|
109
|
+
# Finish
|
|
110
|
+
script += """
|
|
111
|
+
|
|
112
|
+
echo "โ
SETUP COMPLETE" >> $LOG
|
|
113
|
+
touch /root/setup_complete
|
|
114
|
+
"""
|
|
115
|
+
return script
|
xenfra/utils.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import tomllib # Python 3.11+
|
|
3
|
+
|
|
4
|
+
def get_project_context():
|
|
5
|
+
"""
|
|
6
|
+
Scans for project configuration.
|
|
7
|
+
Prioritizes: pyproject.toml > requirements.txt
|
|
8
|
+
Prioritizes: uv > poetry > pip
|
|
9
|
+
"""
|
|
10
|
+
options = {}
|
|
11
|
+
|
|
12
|
+
# 1. Check for Lockfiles (Determines the Manager)
|
|
13
|
+
if os.path.exists("uv.lock"):
|
|
14
|
+
options["uv"] = True
|
|
15
|
+
if os.path.exists("poetry.lock"):
|
|
16
|
+
options["poetry"] = True
|
|
17
|
+
|
|
18
|
+
# 2. Extract Dependencies from pyproject.toml (Best source for names)
|
|
19
|
+
if os.path.exists("pyproject.toml"):
|
|
20
|
+
try:
|
|
21
|
+
with open("pyproject.toml", "rb") as f:
|
|
22
|
+
data = tomllib.load(f)
|
|
23
|
+
deps = data.get("project", {}).get("dependencies", [])
|
|
24
|
+
if deps:
|
|
25
|
+
# Clean versions if needed, or just pass them to pip/uv
|
|
26
|
+
options["toml"] = " ".join(deps)
|
|
27
|
+
except Exception:
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
# 3. Extract Dependencies from requirements.txt (Fallback source)
|
|
31
|
+
if os.path.exists("requirements.txt"):
|
|
32
|
+
try:
|
|
33
|
+
with open("requirements.txt", "r") as f:
|
|
34
|
+
pkgs = [l.strip() for l in f if l.strip() and not l.startswith("#")]
|
|
35
|
+
if pkgs:
|
|
36
|
+
options["pip"] = " ".join(pkgs)
|
|
37
|
+
except:
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
# --- DECISION LOGIC ---
|
|
41
|
+
context = {"type": "pip", "packages": None, "conflict": False}
|
|
42
|
+
|
|
43
|
+
# A. Determine the Manager (Type)
|
|
44
|
+
if "uv" in options:
|
|
45
|
+
context["type"] = "uv"
|
|
46
|
+
elif "poetry" in options:
|
|
47
|
+
context["type"] = "poetry"
|
|
48
|
+
else:
|
|
49
|
+
context["type"] = "pip"
|
|
50
|
+
|
|
51
|
+
# B. Determine the Packages (Content)
|
|
52
|
+
# We prefer TOML because it's the modern standard usually paired with UV/Poetry
|
|
53
|
+
if "toml" in options:
|
|
54
|
+
context["packages"] = options["toml"]
|
|
55
|
+
elif "pip" in options:
|
|
56
|
+
context["packages"] = options["pip"]
|
|
57
|
+
|
|
58
|
+
# C. Check for "True" Conflicts
|
|
59
|
+
# A conflict is when we have ambiguous package sources (TOML vs Requirements)
|
|
60
|
+
# AND we aren't sure which one the user wants.
|
|
61
|
+
if "toml" in options and "pip" in options:
|
|
62
|
+
# If we have both, we flag it so the UI can ask (or default to TOML)
|
|
63
|
+
context["conflict"] = True
|
|
64
|
+
context["choices"] = {
|
|
65
|
+
"1": {"name": "pyproject.toml", "pkgs": options["toml"]},
|
|
66
|
+
"2": {"name": "requirements.txt", "pkgs": options["pip"]}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
return context
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: xenfra
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A 'Zen Mode' infrastructure engine for Python developers.
|
|
5
|
+
Author: xenfra-cloud
|
|
6
|
+
Author-email: xenfra-cloud <xenfracloud@gmail.com>
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Topic :: Software Development :: Build Tools
|
|
13
|
+
Classifier: Topic :: System :: Systems Administration
|
|
14
|
+
Requires-Dist: fabric>=3.2.2
|
|
15
|
+
Requires-Dist: python-digitalocean>=1.17.0
|
|
16
|
+
Requires-Dist: python-dotenv>=1.2.1
|
|
17
|
+
Requires-Dist: rich>=14.2.0
|
|
18
|
+
Requires-Dist: pytest>=8.0.0 ; extra == 'test'
|
|
19
|
+
Requires-Dist: pytest-mock>=3.12.0 ; extra == 'test'
|
|
20
|
+
Requires-Python: >=3.13
|
|
21
|
+
Project-URL: Homepage, https://github.com/xenfra-cloud/xenfra
|
|
22
|
+
Project-URL: Issues, https://github.com/xenfra-cloud/xenfra/issues
|
|
23
|
+
Provides-Extra: test
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# ๐ง Xenfra: Infrastructure in Zen Mode
|
|
27
|
+
|
|
28
|
+
**Xenfra** is a modular infrastructure engine for Python developers that automates the deployment of applications to DigitalOcean. It is designed as a library first, with a beautiful and interactive CLI as the default frontend.
|
|
29
|
+
|
|
30
|
+
It handles the complexity of server provisioning, context-aware configuration, Dockerization, and automatic HTTPS, allowing you to focus on your code.
|
|
31
|
+
|
|
32
|
+
## โจ Core Philosophy
|
|
33
|
+
|
|
34
|
+
* **Engine as the Brain**: `xenfra.engine` is the core library. It owns the DigitalOcean API, the SSH "Auto-Heal" retry loops, and the Dockerizer services. It is stateful, robust, and can be imported into any Python project.
|
|
35
|
+
* **Clients as the Face**: Frontends like the default CLI (`xenfra.cli`) are thin, stateless clients responsible only for user interaction.
|
|
36
|
+
* **Zen Mode**: If a server setup fails due to common issues like a locked package manager, the Engine automatically fixes it without exposing raw errors to the user.
|
|
37
|
+
|
|
38
|
+
## ๐ Quickstart: As a Library
|
|
39
|
+
|
|
40
|
+
This demonstrates the power of Xenfra's engine for programmatic infrastructure management.
|
|
41
|
+
|
|
42
|
+
### 1. Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
# Install from PyPI (once published)
|
|
46
|
+
pip install xenfra
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### 2. Prerequisites
|
|
50
|
+
|
|
51
|
+
Ensure your DigitalOcean API token is available as an environment variable:
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
export DIGITAL_OCEAN_TOKEN="your_secret_token_here"
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### 3. Programmatic Usage
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from xenfra.engine import InfraEngine
|
|
61
|
+
|
|
62
|
+
# Optional: a logger to receive status updates
|
|
63
|
+
def my_logger(message):
|
|
64
|
+
print(f"[My App] {message}")
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
# The engine automatically finds and validates the API token
|
|
68
|
+
engine = InfraEngine()
|
|
69
|
+
|
|
70
|
+
# Define the server and deploy
|
|
71
|
+
result = engine.deploy_server(
|
|
72
|
+
name="my-app-server",
|
|
73
|
+
region="nyc3",
|
|
74
|
+
size="s-1vcpu-1gb",
|
|
75
|
+
image="ubuntu-24-04-x64",
|
|
76
|
+
logger=my_logger
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
print(f"๐ Deployment successful! IP Address: {result.get('ip')}")
|
|
80
|
+
|
|
81
|
+
except Exception as e:
|
|
82
|
+
print(f"โ Deployment failed: {e}")
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## ๐ป CLI Usage
|
|
86
|
+
|
|
87
|
+
Xenfra also provides a beautiful, interactive CLI for manual control.
|
|
88
|
+
|
|
89
|
+
### 1. Installation
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
pip install xenfra
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### 2. Configuration
|
|
96
|
+
|
|
97
|
+
Create a `.env` file in your project root with your DigitalOcean token:
|
|
98
|
+
|
|
99
|
+
```env
|
|
100
|
+
DIGITAL_OCEAN_TOKEN=dop_v1_your_token_here
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### 3. Run the CLI
|
|
104
|
+
|
|
105
|
+
Once installed, simply run the `xenfra` command:
|
|
106
|
+
|
|
107
|
+
```bash
|
|
108
|
+
xenfra
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
This will launch the interactive menu where you can:
|
|
112
|
+
- **๐ Deploy New Server**: A guided workflow to provision and deploy your application.
|
|
113
|
+
- **๐ List Active Servers**: View your current DigitalOcean droplets.
|
|
114
|
+
- **๐งจ Destroy a Server**: Decommission servers you no longer need.
|
|
115
|
+
|
|
116
|
+
## ๐ฆ Supported Frameworks & Features
|
|
117
|
+
|
|
118
|
+
* **Smart Context Detection**: Automatically detects your package manager (`uv` or `pip`).
|
|
119
|
+
* **Automatic Dockerization**: If a web framework is detected (`FastAPI`, `Flask`), Xenfra will:
|
|
120
|
+
* Generate a `Dockerfile`, `docker-compose.yml`, and `Caddyfile`.
|
|
121
|
+
* Deploy your application as a container.
|
|
122
|
+
* Configure **Caddy** as a reverse proxy with **automatic HTTPS**.
|
|
123
|
+
|
|
124
|
+
## ๐ค Contributing
|
|
125
|
+
|
|
126
|
+
Contributions are welcome! Please check our `CONTRIBUTING.md` for more details.
|
|
127
|
+
|
|
128
|
+
## ๐ Created by DevHusnainAi
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
xenfra/__init__.py,sha256=ja0-Vc9T61EXZnPeX84Fi-UOjoGAWu5r90_1t-0WjVw,46
|
|
2
|
+
xenfra/cli.py,sha256=EXhzv5Zd5zNuON-X2mp00SE956_x0stzNUAN4X0tg7w,5840
|
|
3
|
+
xenfra/dockerizer.py,sha256=D-rApduF9ZWuJBh56WPWGxM95o6xxjhhVIxKnwYzF0k,3182
|
|
4
|
+
xenfra/engine.py,sha256=A7Ov7_ub-24kfLjkuFZdwzu-AgIbTUpPCvx0t35d_e4,10636
|
|
5
|
+
xenfra/recipes.py,sha256=rf6dqrbc9P-LPMDQMt2iL100kVMhbWJRgfGYKJ5gqBQ,3780
|
|
6
|
+
xenfra/utils.py,sha256=aGXjJm-pwVCHuCn5UBdrxRcYvM8aJwHQ1kihl7gcxiM,2387
|
|
7
|
+
xenfra-0.1.0.dist-info/WHEEL,sha256=ZyFSCYkV2BrxH6-HRVRg3R9Fo7MALzer9KiPYqNxSbo,79
|
|
8
|
+
xenfra-0.1.0.dist-info/entry_points.txt,sha256=pLGDlV0SH2hWunZJcEY7UkLLleMExFBaxgxRKYhU9mw,44
|
|
9
|
+
xenfra-0.1.0.dist-info/METADATA,sha256=LrR6Sr0UNzAZPZFJJhWscaQR88w0B0HGg3cAtkSXYLU,4222
|
|
10
|
+
xenfra-0.1.0.dist-info/RECORD,,
|