xenfra-sdk 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +21 -21
- xenfra_sdk/cli/main.py +226 -226
- xenfra_sdk/config.py +26 -26
- xenfra_sdk/db/models.py +24 -28
- xenfra_sdk/db/session.py +30 -30
- xenfra_sdk/dependencies.py +39 -39
- xenfra_sdk/dockerizer.py +87 -87
- xenfra_sdk/engine.py +411 -388
- xenfra_sdk/exceptions.py +19 -19
- xenfra_sdk/mcp_client.py +154 -154
- xenfra_sdk/models.py +182 -182
- xenfra_sdk/patterns.json +13 -13
- xenfra_sdk/privacy.py +153 -153
- xenfra_sdk/recipes.py +25 -25
- xenfra_sdk/resources/base.py +3 -3
- xenfra_sdk/resources/deployments.py +89 -89
- xenfra_sdk/resources/intelligence.py +95 -95
- xenfra_sdk/security.py +41 -41
- xenfra_sdk/templates/Dockerfile.j2 +25 -25
- xenfra_sdk/templates/cloud-init.sh.j2 +68 -68
- xenfra_sdk/templates/docker-compose.yml.j2 +33 -33
- {xenfra_sdk-0.1.2.dist-info → xenfra_sdk-0.1.3.dist-info}/METADATA +92 -92
- xenfra_sdk-0.1.3.dist-info/RECORD +31 -0
- {xenfra_sdk-0.1.2.dist-info → xenfra_sdk-0.1.3.dist-info}/WHEEL +1 -1
- xenfra_sdk-0.1.2.dist-info/RECORD +0 -31
xenfra_sdk/engine.py
CHANGED
|
@@ -1,388 +1,411 @@
|
|
|
1
|
-
# src/xenfra/engine.py
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import time
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import Optional
|
|
8
|
-
|
|
9
|
-
import digitalocean
|
|
10
|
-
import fabric
|
|
11
|
-
from dotenv import load_dotenv
|
|
12
|
-
from sqlmodel import Session, select
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
"""
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
"
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
"""
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
)
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
logger("
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
)
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
1
|
+
# src/xenfra/engine.py
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import digitalocean
|
|
10
|
+
import fabric
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
from sqlmodel import Session, select
|
|
13
|
+
|
|
14
|
+
import shutil
|
|
15
|
+
import subprocess
|
|
16
|
+
|
|
17
|
+
# Xenfra modules
|
|
18
|
+
from . import dockerizer, recipes
|
|
19
|
+
from .db.models import Project
|
|
20
|
+
from .db.session import get_session
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DeploymentError(Exception):
|
|
24
|
+
"""Custom exception for deployment failures."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, message, stage="Unknown"):
|
|
27
|
+
self.message = message
|
|
28
|
+
self.stage = stage
|
|
29
|
+
super().__init__(f"Deployment failed at stage '{stage}': {message}")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class InfraEngine:
|
|
33
|
+
"""
|
|
34
|
+
The InfraEngine is the core of Xenfra. It handles all interactions
|
|
35
|
+
with the cloud provider and orchestrates the deployment lifecycle.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, token: str = None, db_session: Session = None):
|
|
39
|
+
"""
|
|
40
|
+
Initializes the engine and validates the API token.
|
|
41
|
+
"""
|
|
42
|
+
load_dotenv()
|
|
43
|
+
self.token = token or os.getenv("DIGITAL_OCEAN_TOKEN")
|
|
44
|
+
self.db_session = db_session or next(get_session())
|
|
45
|
+
|
|
46
|
+
if not self.token:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"DigitalOcean API token not found. Please set the DIGITAL_OCEAN_TOKEN environment variable."
|
|
49
|
+
)
|
|
50
|
+
try:
|
|
51
|
+
self.manager = digitalocean.Manager(token=self.token)
|
|
52
|
+
self.get_user_info()
|
|
53
|
+
except Exception as e:
|
|
54
|
+
raise ConnectionError(f"Failed to connect to DigitalOcean: {e}")
|
|
55
|
+
|
|
56
|
+
def _get_connection(self, ip_address: str):
|
|
57
|
+
"""Establishes a Fabric connection to the server."""
|
|
58
|
+
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
59
|
+
if not Path(private_key_path).exists():
|
|
60
|
+
raise DeploymentError("No private SSH key found at ~/.ssh/id_rsa.", stage="Setup")
|
|
61
|
+
|
|
62
|
+
return fabric.Connection(
|
|
63
|
+
host=ip_address,
|
|
64
|
+
user="root",
|
|
65
|
+
connect_kwargs={"key_filename": [private_key_path]},
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def get_user_info(self):
|
|
69
|
+
"""Retrieves user account information."""
|
|
70
|
+
return self.manager.get_account()
|
|
71
|
+
|
|
72
|
+
def list_servers(self):
|
|
73
|
+
"""Retrieves a list of all Droplets."""
|
|
74
|
+
return self.manager.get_all_droplets()
|
|
75
|
+
|
|
76
|
+
def destroy_server(self, droplet_id: int, db_session: Session = None):
|
|
77
|
+
"""Destroys a Droplet by its ID and removes it from the local DB."""
|
|
78
|
+
session = db_session or self.db_session
|
|
79
|
+
|
|
80
|
+
# Find the project in the local DB
|
|
81
|
+
statement = select(Project).where(Project.droplet_id == droplet_id)
|
|
82
|
+
project_to_delete = session.exec(statement).first()
|
|
83
|
+
|
|
84
|
+
# Destroy the droplet on DigitalOcean
|
|
85
|
+
droplet = digitalocean.Droplet(token=self.token, id=droplet_id)
|
|
86
|
+
droplet.destroy()
|
|
87
|
+
|
|
88
|
+
# If it was in our DB, delete it
|
|
89
|
+
if project_to_delete:
|
|
90
|
+
session.delete(project_to_delete)
|
|
91
|
+
session.commit()
|
|
92
|
+
|
|
93
|
+
def list_projects_from_db(self, db_session: Session = None):
|
|
94
|
+
"""Lists all projects from the local database."""
|
|
95
|
+
session = db_session or self.db_session
|
|
96
|
+
statement = select(Project)
|
|
97
|
+
return session.exec(statement).all()
|
|
98
|
+
|
|
99
|
+
def sync_with_provider(self, db_session: Session = None):
|
|
100
|
+
"""Reconciles the local database with the live state from DigitalOcean."""
|
|
101
|
+
session = db_session or self.db_session
|
|
102
|
+
|
|
103
|
+
# 1. Get live and local states
|
|
104
|
+
live_droplets = self.manager.get_all_droplets(tag_name="xenfra")
|
|
105
|
+
local_projects = self.list_projects_from_db(session)
|
|
106
|
+
|
|
107
|
+
live_map = {d.id: d for d in live_droplets}
|
|
108
|
+
local_map = {p.droplet_id: p for p in local_projects}
|
|
109
|
+
|
|
110
|
+
# 2. Reconcile
|
|
111
|
+
# Add new servers found on DO to our DB
|
|
112
|
+
for droplet_id, droplet in live_map.items():
|
|
113
|
+
if droplet_id not in local_map:
|
|
114
|
+
new_project = Project(
|
|
115
|
+
droplet_id=droplet.id,
|
|
116
|
+
name=droplet.name,
|
|
117
|
+
ip_address=droplet.ip_address,
|
|
118
|
+
status=droplet.status,
|
|
119
|
+
region=droplet.region["slug"],
|
|
120
|
+
size=droplet.size_slug,
|
|
121
|
+
)
|
|
122
|
+
session.add(new_project)
|
|
123
|
+
|
|
124
|
+
# Remove servers from our DB that no longer exist on DO
|
|
125
|
+
for project_id, project in local_map.items():
|
|
126
|
+
if project_id not in live_map:
|
|
127
|
+
session.delete(project)
|
|
128
|
+
|
|
129
|
+
session.commit()
|
|
130
|
+
return self.list_projects_from_db(session)
|
|
131
|
+
|
|
132
|
+
def stream_logs(self, droplet_id: int, db_session: Session = None):
|
|
133
|
+
"""
|
|
134
|
+
Verifies a server exists and streams its logs in real-time.
|
|
135
|
+
"""
|
|
136
|
+
session = db_session or self.db_session
|
|
137
|
+
|
|
138
|
+
# 1. Find project in local DB
|
|
139
|
+
statement = select(Project).where(Project.droplet_id == droplet_id)
|
|
140
|
+
project = session.exec(statement).first()
|
|
141
|
+
if not project:
|
|
142
|
+
raise DeploymentError(
|
|
143
|
+
f"Project with Droplet ID {droplet_id} not found in local database.",
|
|
144
|
+
stage="Log Streaming",
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# 2. Just-in-Time Verification
|
|
148
|
+
try:
|
|
149
|
+
droplet = self.manager.get_droplet(droplet_id)
|
|
150
|
+
except digitalocean.baseapi.DataReadError as e:
|
|
151
|
+
if e.response.status_code == 404:
|
|
152
|
+
# The droplet doesn't exist, so remove it from our DB
|
|
153
|
+
session.delete(project)
|
|
154
|
+
session.commit()
|
|
155
|
+
raise DeploymentError(
|
|
156
|
+
f"Server '{project.name}' (ID: {droplet_id}) no longer exists on DigitalOcean. It has been removed from your local list.",
|
|
157
|
+
stage="Log Streaming",
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
raise e
|
|
161
|
+
|
|
162
|
+
# 3. Stream logs
|
|
163
|
+
ip_address = droplet.ip_address
|
|
164
|
+
with self._get_connection(ip_address) as conn:
|
|
165
|
+
conn.run("cd /root/app && docker-compose logs -f app", pty=True)
|
|
166
|
+
|
|
167
|
+
def get_account_balance(self) -> dict:
|
|
168
|
+
"""
|
|
169
|
+
Retrieves the current account balance from DigitalOcean.
|
|
170
|
+
Placeholder: Actual implementation needed.
|
|
171
|
+
"""
|
|
172
|
+
# In a real scenario, this would call the DigitalOcean API for billing info
|
|
173
|
+
# For now, return mock data
|
|
174
|
+
return {
|
|
175
|
+
"month_to_date_balance": "0.00",
|
|
176
|
+
"account_balance": "0.00",
|
|
177
|
+
"month_to_date_usage": "0.00",
|
|
178
|
+
"generated_at": datetime.now().isoformat(),
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
def get_droplet_cost_estimates(self) -> list:
|
|
182
|
+
"""
|
|
183
|
+
Retrieves a list of Xenfra-managed DigitalOcean droplets with their estimated monthly costs.
|
|
184
|
+
Placeholder: Actual implementation needed.
|
|
185
|
+
"""
|
|
186
|
+
# In a real scenario, this would list droplets and calculate costs
|
|
187
|
+
# For now, return mock data
|
|
188
|
+
return []
|
|
189
|
+
|
|
190
|
+
def _ensure_ssh_key(self, logger):
|
|
191
|
+
"""Ensures a local public SSH key is on DigitalOcean. Generates one if missing (Zen Mode)."""
|
|
192
|
+
pub_key_path = Path.home() / ".ssh" / "id_rsa.pub"
|
|
193
|
+
priv_key_path = Path.home() / ".ssh" / "id_rsa"
|
|
194
|
+
|
|
195
|
+
if not pub_key_path.exists():
|
|
196
|
+
logger(" - [Zen Mode] No SSH key found at ~/.ssh/id_rsa.pub. Generating a new one...")
|
|
197
|
+
try:
|
|
198
|
+
# Ensure .ssh directory exists
|
|
199
|
+
pub_key_path.parent.mkdir(parents=True, exist_ok=True)
|
|
200
|
+
|
|
201
|
+
# Generate RSA keypair without passphrase
|
|
202
|
+
subprocess.run(
|
|
203
|
+
["ssh-keygen", "-t", "rsa", "-b", "4096", "-N", "", "-f", str(priv_key_path)],
|
|
204
|
+
check=True,
|
|
205
|
+
capture_output=True
|
|
206
|
+
)
|
|
207
|
+
logger(" - [Zen Mode] Successfully generated SSH keypair.")
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger(f" - [ERROR] Failed to generate SSH key: {e}")
|
|
210
|
+
raise DeploymentError(
|
|
211
|
+
f"Could not find or generate SSH key: {e}", stage="Setup"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
with open(pub_key_path) as f:
|
|
215
|
+
pub_key_content = f.read()
|
|
216
|
+
|
|
217
|
+
# Check if the key is already on DigitalOcean
|
|
218
|
+
existing_keys = self.manager.get_all_sshkeys()
|
|
219
|
+
for key in existing_keys:
|
|
220
|
+
if key.public_key.strip() == pub_key_content.strip():
|
|
221
|
+
logger(" - Found existing SSH key on DigitalOcean.")
|
|
222
|
+
return key
|
|
223
|
+
|
|
224
|
+
logger(" - No matching SSH key found on provider. Registering new key...")
|
|
225
|
+
# Use a descriptive name including hostname if possible
|
|
226
|
+
import socket
|
|
227
|
+
key_name = f"xenfra-key-{socket.gethostname()}"
|
|
228
|
+
key = digitalocean.SSHKey(
|
|
229
|
+
token=self.token, name=key_name, public_key=pub_key_content
|
|
230
|
+
)
|
|
231
|
+
key.create()
|
|
232
|
+
return key
|
|
233
|
+
|
|
234
|
+
def deploy_server(
|
|
235
|
+
self,
|
|
236
|
+
name: str,
|
|
237
|
+
region: str = "nyc3",
|
|
238
|
+
size: str = "s-1vcpu-1gb",
|
|
239
|
+
image: str = "ubuntu-22-04-x64",
|
|
240
|
+
logger: Optional[callable] = None,
|
|
241
|
+
user_id: Optional[int] = None,
|
|
242
|
+
email: Optional[str] = None,
|
|
243
|
+
domain: Optional[str] = None,
|
|
244
|
+
repo_url: Optional[str] = None,
|
|
245
|
+
db_session: Session = None,
|
|
246
|
+
**kwargs,
|
|
247
|
+
):
|
|
248
|
+
"""A stateful, blocking orchestrator for deploying a new server."""
|
|
249
|
+
droplet = None
|
|
250
|
+
session = db_session or self.db_session
|
|
251
|
+
try:
|
|
252
|
+
# === 1. SETUP STAGE ===
|
|
253
|
+
logger("\n[bold blue]PHASE 1: SETUP[/bold blue]")
|
|
254
|
+
ssh_key = self._ensure_ssh_key(logger)
|
|
255
|
+
|
|
256
|
+
# === 2. ASSET GENERATION STAGE ===
|
|
257
|
+
logger("\n[bold blue]PHASE 2: GENERATING DEPLOYMENT ASSETS[/bold blue]")
|
|
258
|
+
context = {
|
|
259
|
+
"email": email,
|
|
260
|
+
"domain": domain,
|
|
261
|
+
"repo_url": repo_url,
|
|
262
|
+
**kwargs, # Pass db config, etc.
|
|
263
|
+
}
|
|
264
|
+
files = dockerizer.generate_templated_assets(context)
|
|
265
|
+
for file in files:
|
|
266
|
+
logger(f" - Generated {file}")
|
|
267
|
+
|
|
268
|
+
# === 3. CLOUD-INIT STAGE ===
|
|
269
|
+
logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
|
|
270
|
+
cloud_init_script = recipes.generate_stack(context)
|
|
271
|
+
logger(" - Generated cloud-init script.")
|
|
272
|
+
logger(
|
|
273
|
+
f"--- Cloud-init script content ---\n{cloud_init_script}\n---------------------------------"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# === 4. DROPLET CREATION STAGE ===
|
|
277
|
+
logger("\n[bold blue]PHASE 4: PROVISIONING SERVER[/bold blue]")
|
|
278
|
+
droplet = digitalocean.Droplet(
|
|
279
|
+
token=self.token,
|
|
280
|
+
name=name,
|
|
281
|
+
region=region,
|
|
282
|
+
image=image,
|
|
283
|
+
size_slug=size,
|
|
284
|
+
ssh_keys=[ssh_key],
|
|
285
|
+
userdata=cloud_init_script,
|
|
286
|
+
tags=["xenfra"],
|
|
287
|
+
)
|
|
288
|
+
droplet.create()
|
|
289
|
+
logger(
|
|
290
|
+
f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# === 5. POLLING STAGE ===
|
|
294
|
+
logger("\n[bold blue]PHASE 5: WAITING FOR SERVER SETUP[/bold blue]")
|
|
295
|
+
while True:
|
|
296
|
+
droplet.load()
|
|
297
|
+
if droplet.status == "active":
|
|
298
|
+
logger(" - Droplet is active. Waiting for SSH to be available...")
|
|
299
|
+
break
|
|
300
|
+
time.sleep(10)
|
|
301
|
+
|
|
302
|
+
ip_address = droplet.ip_address
|
|
303
|
+
|
|
304
|
+
# Retry SSH connection
|
|
305
|
+
conn = None
|
|
306
|
+
max_retries = 12 # 2-minute timeout for SSH
|
|
307
|
+
for i in range(max_retries):
|
|
308
|
+
try:
|
|
309
|
+
logger(f" - Attempting SSH connection ({i + 1}/{max_retries})...")
|
|
310
|
+
conn = self._get_connection(ip_address)
|
|
311
|
+
conn.open() # Explicitly open the connection
|
|
312
|
+
logger(" - SSH connection established.")
|
|
313
|
+
break
|
|
314
|
+
except Exception as e:
|
|
315
|
+
if i < max_retries - 1:
|
|
316
|
+
logger(" - SSH connection failed. Retrying in 10s...")
|
|
317
|
+
time.sleep(10)
|
|
318
|
+
else:
|
|
319
|
+
raise DeploymentError(
|
|
320
|
+
f"Failed to establish SSH connection: {e}", stage="Polling"
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
if not conn or not conn.is_connected:
|
|
324
|
+
raise DeploymentError("Could not establish SSH connection.", stage="Polling")
|
|
325
|
+
|
|
326
|
+
with conn:
|
|
327
|
+
for i in range(30): # 5-minute timeout for cloud-init
|
|
328
|
+
if conn.run("test -f /root/setup_complete", warn=True).ok:
|
|
329
|
+
logger(" - Cloud-init setup complete.")
|
|
330
|
+
break
|
|
331
|
+
time.sleep(10)
|
|
332
|
+
else:
|
|
333
|
+
raise DeploymentError(
|
|
334
|
+
"Server setup script failed to complete in time.", stage="Polling"
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
# === 6. CODE UPLOAD STAGE ===
|
|
338
|
+
logger("\n[bold blue]PHASE 6: UPLOADING APPLICATION CODE[/bold blue]")
|
|
339
|
+
with self._get_connection(ip_address) as conn:
|
|
340
|
+
# If repo_url is provided, clone it instead of uploading local code
|
|
341
|
+
if repo_url:
|
|
342
|
+
logger(f" - Cloning repository from {repo_url}...")
|
|
343
|
+
conn.run(f"git clone {repo_url} /root/app")
|
|
344
|
+
else:
|
|
345
|
+
fabric.transfer.Transfer(conn).upload(
|
|
346
|
+
".", "/root/app", exclude=[".git", ".venv", "__pycache__"]
|
|
347
|
+
)
|
|
348
|
+
logger(" - Code upload complete.")
|
|
349
|
+
|
|
350
|
+
# === 7. FINAL DEPLOY STAGE ===
|
|
351
|
+
logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
|
|
352
|
+
with self._get_connection(ip_address) as conn:
|
|
353
|
+
result = conn.run("cd /root/app && docker-compose up -d --build", hide=True)
|
|
354
|
+
if result.failed:
|
|
355
|
+
raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
|
|
356
|
+
logger(" - Docker containers are building in the background...")
|
|
357
|
+
|
|
358
|
+
# === 8. VERIFICATION STAGE ===
|
|
359
|
+
logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
|
|
360
|
+
app_port = context.get("port", 8000)
|
|
361
|
+
for i in range(24): # 2-minute timeout for health checks
|
|
362
|
+
logger(f" - Health check attempt {i + 1}/24...")
|
|
363
|
+
with self._get_connection(ip_address) as conn:
|
|
364
|
+
# Check if container is running
|
|
365
|
+
ps_result = conn.run("cd /root/app && docker-compose ps", hide=True)
|
|
366
|
+
if "running" not in ps_result.stdout:
|
|
367
|
+
time.sleep(5)
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
# Check if application is responsive
|
|
371
|
+
curl_result = conn.run(
|
|
372
|
+
f"curl -s --fail http://localhost:{app_port}/", warn=True
|
|
373
|
+
)
|
|
374
|
+
if curl_result.ok:
|
|
375
|
+
logger(
|
|
376
|
+
"[bold green] - Health check passed! Application is live.[/bold green]"
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
# === 9. PERSISTENCE STAGE ===
|
|
380
|
+
logger("\n[bold blue]PHASE 9: SAVING DEPLOYMENT TO DATABASE[/bold blue]")
|
|
381
|
+
project = Project(
|
|
382
|
+
droplet_id=droplet.id,
|
|
383
|
+
name=droplet.name,
|
|
384
|
+
ip_address=ip_address,
|
|
385
|
+
status=droplet.status,
|
|
386
|
+
region=droplet.region["slug"],
|
|
387
|
+
size=droplet.size_slug,
|
|
388
|
+
user_id=user_id, # Save the user_id
|
|
389
|
+
)
|
|
390
|
+
session.add(project)
|
|
391
|
+
session.commit()
|
|
392
|
+
logger(" - Deployment saved.")
|
|
393
|
+
|
|
394
|
+
return droplet # Return the full droplet object
|
|
395
|
+
time.sleep(5)
|
|
396
|
+
else:
|
|
397
|
+
# On failure, get logs and destroy droplet
|
|
398
|
+
with self._get_connection(ip_address) as conn:
|
|
399
|
+
logs = conn.run("cd /root/app && docker-compose logs", hide=True).stdout
|
|
400
|
+
raise DeploymentError(
|
|
401
|
+
f"Application failed to become healthy in time. Logs:\n{logs}",
|
|
402
|
+
stage="Verification",
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
except Exception as e:
|
|
406
|
+
if droplet:
|
|
407
|
+
logger(
|
|
408
|
+
f"[bold red]Deployment failed. The server '{droplet.name}' will NOT be cleaned up for debugging purposes.[/bold red]"
|
|
409
|
+
)
|
|
410
|
+
# droplet.destroy() # Commented out for debugging
|
|
411
|
+
raise e
|