xenfra-sdk 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +61 -21
- xenfra_sdk/cli/main.py +226 -226
- xenfra_sdk/client.py +3 -0
- xenfra_sdk/config.py +26 -26
- xenfra_sdk/db/models.py +24 -24
- xenfra_sdk/db/session.py +30 -30
- xenfra_sdk/dependencies.py +39 -39
- xenfra_sdk/detection.py +396 -0
- xenfra_sdk/dockerizer.py +195 -104
- xenfra_sdk/engine.py +741 -471
- xenfra_sdk/exceptions.py +19 -19
- xenfra_sdk/manifest.py +212 -0
- xenfra_sdk/mcp_client.py +154 -154
- xenfra_sdk/models.py +184 -183
- xenfra_sdk/orchestrator.py +666 -0
- xenfra_sdk/patterns.json +13 -13
- xenfra_sdk/privacy.py +153 -153
- xenfra_sdk/recipes.py +26 -26
- xenfra_sdk/resources/base.py +3 -3
- xenfra_sdk/resources/deployments.py +278 -235
- xenfra_sdk/resources/files.py +101 -0
- xenfra_sdk/resources/intelligence.py +102 -95
- xenfra_sdk/security.py +41 -41
- xenfra_sdk/security_scanner.py +431 -0
- xenfra_sdk/templates/Caddyfile.j2 +14 -0
- xenfra_sdk/templates/Dockerfile.j2 +41 -25
- xenfra_sdk/templates/cloud-init.sh.j2 +90 -90
- xenfra_sdk/templates/docker-compose-multi.yml.j2 +29 -0
- xenfra_sdk/templates/docker-compose.yml.j2 +30 -27
- xenfra_sdk-0.2.3.dist-info/METADATA +116 -0
- xenfra_sdk-0.2.3.dist-info/RECORD +38 -0
- {xenfra_sdk-0.2.1.dist-info → xenfra_sdk-0.2.3.dist-info}/WHEEL +2 -2
- xenfra_sdk-0.2.1.dist-info/METADATA +0 -118
- xenfra_sdk-0.2.1.dist-info/RECORD +0 -31
xenfra_sdk/engine.py
CHANGED
|
@@ -1,471 +1,741 @@
|
|
|
1
|
-
# src/xenfra/engine.py
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import time
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import Optional
|
|
8
|
-
|
|
9
|
-
import digitalocean
|
|
10
|
-
import fabric
|
|
11
|
-
from dotenv import load_dotenv
|
|
12
|
-
from sqlmodel import Session, select
|
|
13
|
-
|
|
14
|
-
import shutil
|
|
15
|
-
import subprocess
|
|
16
|
-
|
|
17
|
-
# Xenfra modules
|
|
18
|
-
from . import dockerizer, recipes
|
|
19
|
-
from .db.models import Project
|
|
20
|
-
from .db.session import get_session
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class DeploymentError(Exception):
|
|
24
|
-
"""Custom exception for deployment failures."""
|
|
25
|
-
|
|
26
|
-
def __init__(self, message, stage="Unknown"):
|
|
27
|
-
self.message = message
|
|
28
|
-
self.stage = stage
|
|
29
|
-
super().__init__(f"Deployment failed at stage '{stage}': {message}")
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class InfraEngine:
|
|
33
|
-
"""
|
|
34
|
-
The InfraEngine is the core of Xenfra. It handles all interactions
|
|
35
|
-
with the cloud provider and orchestrates the deployment lifecycle.
|
|
36
|
-
"""
|
|
37
|
-
|
|
38
|
-
def __init__(self, token: str = None, db_session: Session = None):
|
|
39
|
-
"""
|
|
40
|
-
Initializes the engine and validates the API token.
|
|
41
|
-
"""
|
|
42
|
-
load_dotenv()
|
|
43
|
-
self.token = token or os.getenv("DIGITAL_OCEAN_TOKEN")
|
|
44
|
-
self.db_session = db_session or next(get_session())
|
|
45
|
-
|
|
46
|
-
if not self.token:
|
|
47
|
-
raise ValueError(
|
|
48
|
-
"DigitalOcean API token not found. Please set the DIGITAL_OCEAN_TOKEN environment variable."
|
|
49
|
-
)
|
|
50
|
-
try:
|
|
51
|
-
self.manager = digitalocean.Manager(token=self.token)
|
|
52
|
-
self.get_user_info()
|
|
53
|
-
except Exception as e:
|
|
54
|
-
raise ConnectionError(f"Failed to connect to DigitalOcean: {e}")
|
|
55
|
-
|
|
56
|
-
def _get_connection(self, ip_address: str):
|
|
57
|
-
"""Establishes a Fabric connection to the server."""
|
|
58
|
-
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
59
|
-
if not Path(private_key_path).exists():
|
|
60
|
-
raise DeploymentError("No private SSH key found at ~/.ssh/id_rsa.", stage="Setup")
|
|
61
|
-
|
|
62
|
-
return fabric.Connection(
|
|
63
|
-
host=ip_address,
|
|
64
|
-
user="root",
|
|
65
|
-
connect_kwargs={"key_filename": [private_key_path]},
|
|
66
|
-
)
|
|
67
|
-
|
|
68
|
-
def get_user_info(self):
|
|
69
|
-
"""Retrieves user account information."""
|
|
70
|
-
return self.manager.get_account()
|
|
71
|
-
|
|
72
|
-
def list_servers(self):
|
|
73
|
-
"""Retrieves a list of all Droplets."""
|
|
74
|
-
return self.manager.get_all_droplets()
|
|
75
|
-
|
|
76
|
-
def destroy_server(self, droplet_id: int, db_session: Session = None):
|
|
77
|
-
"""
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
#
|
|
125
|
-
for
|
|
126
|
-
if
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
"
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
#
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
# ===
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
f"
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
1
|
+
# src/xenfra/engine.py
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, Dict
|
|
8
|
+
|
|
9
|
+
import digitalocean
|
|
10
|
+
import fabric
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
from sqlmodel import Session, select
|
|
13
|
+
|
|
14
|
+
import shutil
|
|
15
|
+
import subprocess
|
|
16
|
+
|
|
17
|
+
# Xenfra modules
|
|
18
|
+
from . import dockerizer, recipes
|
|
19
|
+
from .db.models import Project
|
|
20
|
+
from .db.session import get_session
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DeploymentError(Exception):
|
|
24
|
+
"""Custom exception for deployment failures."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, message, stage="Unknown"):
|
|
27
|
+
self.message = message
|
|
28
|
+
self.stage = stage
|
|
29
|
+
super().__init__(f"Deployment failed at stage '{stage}': {message}")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class InfraEngine:
|
|
33
|
+
"""
|
|
34
|
+
The InfraEngine is the core of Xenfra. It handles all interactions
|
|
35
|
+
with the cloud provider and orchestrates the deployment lifecycle.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, token: str = None, db_session: Session = None):
|
|
39
|
+
"""
|
|
40
|
+
Initializes the engine and validates the API token.
|
|
41
|
+
"""
|
|
42
|
+
load_dotenv()
|
|
43
|
+
self.token = token or os.getenv("DIGITAL_OCEAN_TOKEN")
|
|
44
|
+
self.db_session = db_session or next(get_session())
|
|
45
|
+
|
|
46
|
+
if not self.token:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"DigitalOcean API token not found. Please set the DIGITAL_OCEAN_TOKEN environment variable."
|
|
49
|
+
)
|
|
50
|
+
try:
|
|
51
|
+
self.manager = digitalocean.Manager(token=self.token)
|
|
52
|
+
self.get_user_info()
|
|
53
|
+
except Exception as e:
|
|
54
|
+
raise ConnectionError(f"Failed to connect to DigitalOcean: {e}")
|
|
55
|
+
|
|
56
|
+
def _get_connection(self, ip_address: str):
|
|
57
|
+
"""Establishes a Fabric connection to the server."""
|
|
58
|
+
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
59
|
+
if not Path(private_key_path).exists():
|
|
60
|
+
raise DeploymentError("No private SSH key found at ~/.ssh/id_rsa.", stage="Setup")
|
|
61
|
+
|
|
62
|
+
return fabric.Connection(
|
|
63
|
+
host=ip_address,
|
|
64
|
+
user="root",
|
|
65
|
+
connect_kwargs={"key_filename": [private_key_path]},
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def get_user_info(self):
|
|
69
|
+
"""Retrieves user account information."""
|
|
70
|
+
return self.manager.get_account()
|
|
71
|
+
|
|
72
|
+
def list_servers(self):
|
|
73
|
+
"""Retrieves a list of all Droplets."""
|
|
74
|
+
return self.manager.get_all_droplets()
|
|
75
|
+
|
|
76
|
+
def destroy_server(self, droplet_id: int, db_session: Session = None):
|
|
77
|
+
"""
|
|
78
|
+
Idempotent droplet destruction.
|
|
79
|
+
|
|
80
|
+
Destroys the droplet and removes DB records. Handles 404 errors gracefully
|
|
81
|
+
(if droplet already destroyed, continues to DB cleanup).
|
|
82
|
+
"""
|
|
83
|
+
session = db_session or self.db_session
|
|
84
|
+
|
|
85
|
+
# Find the project in the local DB
|
|
86
|
+
statement = select(Project).where(Project.droplet_id == droplet_id)
|
|
87
|
+
project_to_delete = session.exec(statement).first()
|
|
88
|
+
|
|
89
|
+
# Destroy the droplet on DigitalOcean (handle 404 gracefully)
|
|
90
|
+
try:
|
|
91
|
+
droplet = digitalocean.Droplet(token=self.token, id=droplet_id)
|
|
92
|
+
droplet.destroy()
|
|
93
|
+
except Exception as e:
|
|
94
|
+
# If 404, droplet already gone - that's OK
|
|
95
|
+
error_str = str(e).lower()
|
|
96
|
+
if "404" in error_str or "not found" in error_str:
|
|
97
|
+
pass # Continue to DB cleanup
|
|
98
|
+
else:
|
|
99
|
+
raise # Unexpected error
|
|
100
|
+
|
|
101
|
+
# If it was in our DB, delete it
|
|
102
|
+
if project_to_delete:
|
|
103
|
+
session.delete(project_to_delete)
|
|
104
|
+
session.commit()
|
|
105
|
+
|
|
106
|
+
def list_projects_from_db(self, db_session: Session = None):
|
|
107
|
+
"""Lists all projects from the local database."""
|
|
108
|
+
session = db_session or self.db_session
|
|
109
|
+
statement = select(Project)
|
|
110
|
+
return session.exec(statement).all()
|
|
111
|
+
|
|
112
|
+
def sync_with_provider(self, db_session: Session = None):
|
|
113
|
+
"""Reconciles the local database with the live state from DigitalOcean."""
|
|
114
|
+
session = db_session or self.db_session
|
|
115
|
+
|
|
116
|
+
# 1. Get live and local states
|
|
117
|
+
live_droplets = self.manager.get_all_droplets(tag_name="xenfra")
|
|
118
|
+
local_projects = self.list_projects_from_db(session)
|
|
119
|
+
|
|
120
|
+
live_map = {d.id: d for d in live_droplets}
|
|
121
|
+
local_map = {p.droplet_id: p for p in local_projects}
|
|
122
|
+
|
|
123
|
+
# 2. Reconcile
|
|
124
|
+
# Add new servers found on DO to our DB
|
|
125
|
+
for droplet_id, droplet in live_map.items():
|
|
126
|
+
if droplet_id not in local_map:
|
|
127
|
+
new_project = Project(
|
|
128
|
+
droplet_id=droplet.id,
|
|
129
|
+
name=droplet.name,
|
|
130
|
+
ip_address=droplet.ip_address,
|
|
131
|
+
status=droplet.status,
|
|
132
|
+
region=droplet.region["slug"],
|
|
133
|
+
size=droplet.size_slug,
|
|
134
|
+
)
|
|
135
|
+
session.add(new_project)
|
|
136
|
+
|
|
137
|
+
# Remove servers from our DB that no longer exist on DO
|
|
138
|
+
for project_id, project in local_map.items():
|
|
139
|
+
if project_id not in live_map:
|
|
140
|
+
session.delete(project)
|
|
141
|
+
|
|
142
|
+
session.commit()
|
|
143
|
+
return self.list_projects_from_db(session)
|
|
144
|
+
|
|
145
|
+
def stream_logs(self, droplet_id: int, db_session: Session = None):
|
|
146
|
+
"""
|
|
147
|
+
Verifies a server exists and streams its logs in real-time.
|
|
148
|
+
"""
|
|
149
|
+
session = db_session or self.db_session
|
|
150
|
+
|
|
151
|
+
# 1. Find project in local DB
|
|
152
|
+
statement = select(Project).where(Project.droplet_id == droplet_id)
|
|
153
|
+
project = session.exec(statement).first()
|
|
154
|
+
if not project:
|
|
155
|
+
raise DeploymentError(
|
|
156
|
+
f"Project with Droplet ID {droplet_id} not found in local database.",
|
|
157
|
+
stage="Log Streaming",
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# 2. Just-in-Time Verification
|
|
161
|
+
try:
|
|
162
|
+
droplet = self.manager.get_droplet(droplet_id)
|
|
163
|
+
except digitalocean.baseapi.DataReadError as e:
|
|
164
|
+
if e.response.status_code == 404:
|
|
165
|
+
# The droplet doesn't exist, so remove it from our DB
|
|
166
|
+
session.delete(project)
|
|
167
|
+
session.commit()
|
|
168
|
+
raise DeploymentError(
|
|
169
|
+
f"Server '{project.name}' (ID: {droplet_id}) no longer exists on DigitalOcean. It has been removed from your local list.",
|
|
170
|
+
stage="Log Streaming",
|
|
171
|
+
)
|
|
172
|
+
else:
|
|
173
|
+
raise e
|
|
174
|
+
|
|
175
|
+
# 3. Stream logs
|
|
176
|
+
ip_address = droplet.ip_address
|
|
177
|
+
with self._get_connection(ip_address) as conn:
|
|
178
|
+
conn.run("cd /root/app && docker compose logs -f app", pty=True)
|
|
179
|
+
|
|
180
|
+
def get_account_balance(self) -> dict:
|
|
181
|
+
"""
|
|
182
|
+
Retrieves the current account balance from DigitalOcean.
|
|
183
|
+
Placeholder: Actual implementation needed.
|
|
184
|
+
"""
|
|
185
|
+
# In a real scenario, this would call the DigitalOcean API for billing info
|
|
186
|
+
# For now, return mock data
|
|
187
|
+
return {
|
|
188
|
+
"month_to_date_balance": "0.00",
|
|
189
|
+
"account_balance": "0.00",
|
|
190
|
+
"month_to_date_usage": "0.00",
|
|
191
|
+
"generated_at": datetime.now().isoformat(),
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
def get_droplet_cost_estimates(self) -> list:
|
|
195
|
+
"""
|
|
196
|
+
Retrieves a list of Xenfra-managed DigitalOcean droplets with their estimated monthly costs.
|
|
197
|
+
Placeholder: Actual implementation needed.
|
|
198
|
+
"""
|
|
199
|
+
# In a real scenario, this would list droplets and calculate costs
|
|
200
|
+
# For now, return mock data
|
|
201
|
+
return []
|
|
202
|
+
|
|
203
|
+
def _ensure_ssh_key(self, logger):
|
|
204
|
+
"""Ensures a local public SSH key is on DigitalOcean. Generates one if missing (Zen Mode)."""
|
|
205
|
+
pub_key_path = Path.home() / ".ssh" / "id_rsa.pub"
|
|
206
|
+
priv_key_path = Path.home() / ".ssh" / "id_rsa"
|
|
207
|
+
|
|
208
|
+
if not pub_key_path.exists():
|
|
209
|
+
logger(" - [Zen Mode] No SSH key found at ~/.ssh/id_rsa.pub. Generating a new one...")
|
|
210
|
+
try:
|
|
211
|
+
# Ensure .ssh directory exists
|
|
212
|
+
pub_key_path.parent.mkdir(parents=True, exist_ok=True)
|
|
213
|
+
|
|
214
|
+
# Generate RSA keypair without passphrase
|
|
215
|
+
subprocess.run(
|
|
216
|
+
["ssh-keygen", "-t", "rsa", "-b", "4096", "-N", "", "-f", str(priv_key_path)],
|
|
217
|
+
check=True,
|
|
218
|
+
capture_output=True
|
|
219
|
+
)
|
|
220
|
+
logger(" - [Zen Mode] Successfully generated SSH keypair.")
|
|
221
|
+
except Exception as e:
|
|
222
|
+
logger(f" - [ERROR] Failed to generate SSH key: {e}")
|
|
223
|
+
raise DeploymentError(
|
|
224
|
+
f"Could not find or generate SSH key: {e}", stage="Setup"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
with open(pub_key_path) as f:
|
|
228
|
+
pub_key_content = f.read()
|
|
229
|
+
|
|
230
|
+
# Check if the key is already on DigitalOcean
|
|
231
|
+
existing_keys = self.manager.get_all_sshkeys()
|
|
232
|
+
for key in existing_keys:
|
|
233
|
+
if key.public_key.strip() == pub_key_content.strip():
|
|
234
|
+
logger(" - Found existing SSH key on DigitalOcean.")
|
|
235
|
+
return key
|
|
236
|
+
|
|
237
|
+
logger(" - No matching SSH key found on provider. Registering new key...")
|
|
238
|
+
# Use a descriptive name including hostname if possible
|
|
239
|
+
import socket
|
|
240
|
+
key_name = f"xenfra-key-{socket.gethostname()}"
|
|
241
|
+
key = digitalocean.SSHKey(
|
|
242
|
+
token=self.token, name=key_name, public_key=pub_key_content
|
|
243
|
+
)
|
|
244
|
+
key.create()
|
|
245
|
+
return key
|
|
246
|
+
|
|
247
|
+
def deploy_server(
|
|
248
|
+
self,
|
|
249
|
+
name: str,
|
|
250
|
+
region: str = "nyc3",
|
|
251
|
+
size: str = "s-1vcpu-1gb",
|
|
252
|
+
image: str = "ubuntu-22-04-x64",
|
|
253
|
+
logger: Optional[callable] = None,
|
|
254
|
+
user_id: Optional[int] = None,
|
|
255
|
+
email: Optional[str] = None,
|
|
256
|
+
domain: Optional[str] = None,
|
|
257
|
+
repo_url: Optional[str] = None,
|
|
258
|
+
is_dockerized: bool = True,
|
|
259
|
+
db_session: Session = None,
|
|
260
|
+
port: int = 8000,
|
|
261
|
+
command: str = None,
|
|
262
|
+
entrypoint: str = None, # e.g., "todo.main:app"
|
|
263
|
+
database: str = None,
|
|
264
|
+
package_manager: str = None,
|
|
265
|
+
dependency_file: str = None,
|
|
266
|
+
file_manifest: list = None, # Delta upload: [{path, sha, size}, ...]
|
|
267
|
+
get_file_content: callable = None, # Function to get file content by SHA
|
|
268
|
+
cleanup_on_failure: bool = False, # Auto-cleanup resources on failure
|
|
269
|
+
extra_assets: Dict[str, str] = None, # Additional files to write (e.g. Dockerfiles)
|
|
270
|
+
# Multi-service deployment (from ServiceOrchestrator)
|
|
271
|
+
multi_service_compose: str = None, # Pre-generated docker-compose.yml for multi-service
|
|
272
|
+
multi_service_caddy: str = None, # Pre-generated Caddyfile for multi-service routing
|
|
273
|
+
services: list = None, # List of ServiceDefinition for multi-service deployments
|
|
274
|
+
**kwargs,
|
|
275
|
+
):
|
|
276
|
+
"""A stateful, blocking orchestrator for deploying a new server."""
|
|
277
|
+
droplet = None
|
|
278
|
+
session = db_session or self.db_session
|
|
279
|
+
branch = kwargs.get("branch", "main") # Extract branch from kwargs
|
|
280
|
+
framework = kwargs.get("framework") # Extract framework from kwargs
|
|
281
|
+
|
|
282
|
+
try:
|
|
283
|
+
# === 0. MICROSERVICES DELEGATION ===
|
|
284
|
+
# If services are provided but no pre-generated assets, delegate to Orchestrator
|
|
285
|
+
if services and not (multi_service_compose or multi_service_caddy):
|
|
286
|
+
logger("\n[bold magenta]MICROSERVICES DETECTED - Delegating to ServiceOrchestrator[/bold magenta]")
|
|
287
|
+
from .orchestrator import ServiceOrchestrator, load_services_from_xenfra_yaml
|
|
288
|
+
from .manifest import create_services_from_detected
|
|
289
|
+
|
|
290
|
+
# Convert dicts to ServiceDefinition objects if needed
|
|
291
|
+
service_objs = []
|
|
292
|
+
if services and isinstance(services[0], dict):
|
|
293
|
+
service_objs = create_services_from_detected(services)
|
|
294
|
+
else:
|
|
295
|
+
service_objs = services
|
|
296
|
+
|
|
297
|
+
# Determine mode (can be passed in kwargs or default to single-droplet)
|
|
298
|
+
mode = kwargs.get("mode", "single-droplet")
|
|
299
|
+
|
|
300
|
+
orchestrator = ServiceOrchestrator(
|
|
301
|
+
engine=self,
|
|
302
|
+
services=service_objs,
|
|
303
|
+
project_name=name,
|
|
304
|
+
mode=mode,
|
|
305
|
+
file_manifest=file_manifest
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
return orchestrator.deploy(
|
|
309
|
+
logger=logger,
|
|
310
|
+
# Pass all original arguments to ensure they propagate
|
|
311
|
+
region=region,
|
|
312
|
+
size=size,
|
|
313
|
+
image=image,
|
|
314
|
+
user_id=user_id,
|
|
315
|
+
email=email,
|
|
316
|
+
domain=domain,
|
|
317
|
+
repo_url=repo_url,
|
|
318
|
+
is_dockerized=is_dockerized,
|
|
319
|
+
db_session=db_session,
|
|
320
|
+
port=port,
|
|
321
|
+
command=command,
|
|
322
|
+
entrypoint=entrypoint,
|
|
323
|
+
database=database,
|
|
324
|
+
package_manager=package_manager,
|
|
325
|
+
dependency_file=dependency_file,
|
|
326
|
+
file_manifest=file_manifest,
|
|
327
|
+
get_file_content=get_file_content,
|
|
328
|
+
cleanup_on_failure=cleanup_on_failure,
|
|
329
|
+
extra_assets=extra_assets,
|
|
330
|
+
**kwargs
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
# === 0. EARLY VALIDATION ===
|
|
334
|
+
# Check code source BEFORE creating droplet
|
|
335
|
+
has_code_source = repo_url or (file_manifest and get_file_content)
|
|
336
|
+
if os.getenv("XENFRA_SERVICE_MODE") == "true" and not has_code_source:
|
|
337
|
+
raise DeploymentError(
|
|
338
|
+
"No code source provided. Use git_repo URL or upload files first. "
|
|
339
|
+
"Local folder deployment is not supported via the cloud API.",
|
|
340
|
+
stage="Validation",
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# === 1. SETUP STAGE ===
|
|
344
|
+
logger("\n[bold blue]PHASE 1: SETUP[/bold blue]")
|
|
345
|
+
ssh_key = self._ensure_ssh_key(logger)
|
|
346
|
+
|
|
347
|
+
# === 2. ASSET GENERATION STAGE ===
|
|
348
|
+
logger("\n[bold blue]PHASE 2: GENERATING DEPLOYMENT ASSETS[/bold blue]")
|
|
349
|
+
|
|
350
|
+
# Detect Python version from project files if using delta upload
|
|
351
|
+
python_version = "python:3.11-slim" # Default
|
|
352
|
+
if file_manifest and get_file_content:
|
|
353
|
+
# Build file info with content for version detection
|
|
354
|
+
version_files = []
|
|
355
|
+
for finfo in file_manifest:
|
|
356
|
+
path = finfo.get('path', '')
|
|
357
|
+
if path in ['.python-version', 'pyproject.toml']:
|
|
358
|
+
content = get_file_content(finfo.get('sha', ''))
|
|
359
|
+
if content:
|
|
360
|
+
version_files.append({
|
|
361
|
+
'path': path,
|
|
362
|
+
'content': content.decode('utf-8', errors='ignore')
|
|
363
|
+
})
|
|
364
|
+
|
|
365
|
+
if version_files:
|
|
366
|
+
python_version = dockerizer.detect_python_version(version_files)
|
|
367
|
+
logger(f" - Detected Python version: {python_version}")
|
|
368
|
+
|
|
369
|
+
context = {
|
|
370
|
+
"email": email,
|
|
371
|
+
"domain": domain,
|
|
372
|
+
"repo_url": repo_url,
|
|
373
|
+
"port": port or 8000,
|
|
374
|
+
"command": command,
|
|
375
|
+
"entrypoint": entrypoint, # Pass entrypoint to templates (e.g., "todo.main:app")
|
|
376
|
+
"database": database,
|
|
377
|
+
"package_manager": package_manager or "pip",
|
|
378
|
+
"dependency_file": dependency_file or "requirements.txt",
|
|
379
|
+
"framework": framework, # Explicitly include framework
|
|
380
|
+
"python_version": python_version, # Auto-detected or default
|
|
381
|
+
**kwargs, # Pass any additional config
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
# Check if this is a multi-service deployment
|
|
385
|
+
if multi_service_compose:
|
|
386
|
+
# Use pre-generated assets from ServiceOrchestrator
|
|
387
|
+
logger(" - Using multi-service configuration")
|
|
388
|
+
rendered_assets = {
|
|
389
|
+
"docker-compose.yml": multi_service_compose,
|
|
390
|
+
}
|
|
391
|
+
if multi_service_caddy:
|
|
392
|
+
rendered_assets["Caddyfile"] = multi_service_caddy
|
|
393
|
+
logger(f" - Caddyfile for {len(services) if services else 0} services")
|
|
394
|
+
else:
|
|
395
|
+
# Render templates to strings (NOT written to disk) - single service
|
|
396
|
+
rendered_assets = dockerizer.render_deployment_assets(context)
|
|
397
|
+
if not rendered_assets:
|
|
398
|
+
raise DeploymentError("Failed to render deployment assets. Is framework specified?", stage="Asset Generation")
|
|
399
|
+
|
|
400
|
+
# Merge extra assets (like service-specific Dockerfiles)
|
|
401
|
+
if extra_assets:
|
|
402
|
+
rendered_assets.update(extra_assets)
|
|
403
|
+
logger(f" - Included {len(extra_assets)} extra assets")
|
|
404
|
+
|
|
405
|
+
for filename in rendered_assets:
|
|
406
|
+
logger(f" - Rendered {filename} ({len(rendered_assets[filename])} bytes)")
|
|
407
|
+
|
|
408
|
+
# === 3. CLOUD-INIT STAGE ===
|
|
409
|
+
logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
|
|
410
|
+
cloud_init_script = recipes.generate_stack(context, is_dockerized=is_dockerized)
|
|
411
|
+
logger(" - Generated cloud-init script.")
|
|
412
|
+
logger(
|
|
413
|
+
f"--- Cloud-init script content ---\n{cloud_init_script}\n---------------------------------"
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# === 4. DROPLET CREATION STAGE ===
|
|
417
|
+
logger("\n[bold blue]PHASE 4: PROVISIONING SERVER[/bold blue]")
|
|
418
|
+
|
|
419
|
+
# Machine Reuse: Look for existing droplet with same name and 'xenfra' tag
|
|
420
|
+
existing_droplets = digitalocean.Manager(token=self.token).get_all_droplets(tag_name="xenfra")
|
|
421
|
+
droplet = next((d for d in existing_droplets if d.name == name), None)
|
|
422
|
+
|
|
423
|
+
if droplet and droplet.status == "active":
|
|
424
|
+
logger(f" - Found existing active droplet '{name}' (ID: {droplet.id}). Reusing machine...")
|
|
425
|
+
else:
|
|
426
|
+
if droplet:
|
|
427
|
+
logger(f" - Found existing droplet '{name}' but it's not active ({droplet.status}). Creating new one...")
|
|
428
|
+
|
|
429
|
+
droplet = digitalocean.Droplet(
|
|
430
|
+
token=self.token,
|
|
431
|
+
name=name,
|
|
432
|
+
region=region,
|
|
433
|
+
image=image,
|
|
434
|
+
size_slug=size,
|
|
435
|
+
ssh_keys=[ssh_key.id],
|
|
436
|
+
user_data=cloud_init_script,
|
|
437
|
+
tags=["xenfra"],
|
|
438
|
+
private_networking=True,
|
|
439
|
+
)
|
|
440
|
+
droplet.create()
|
|
441
|
+
logger(
|
|
442
|
+
f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
# === 5. POLLING STAGE ===
|
|
446
|
+
logger("\n[bold blue]PHASE 5: WAITING FOR SERVER SETUP[/bold blue]")
|
|
447
|
+
while True:
|
|
448
|
+
droplet.load()
|
|
449
|
+
if droplet.status == "active":
|
|
450
|
+
logger(" - Droplet is active. Waiting for SSH to be available...")
|
|
451
|
+
break
|
|
452
|
+
time.sleep(10)
|
|
453
|
+
|
|
454
|
+
ip_address = droplet.ip_address
|
|
455
|
+
|
|
456
|
+
# Retry SSH connection
|
|
457
|
+
conn = None
|
|
458
|
+
max_retries = 12 # 2-minute timeout for SSH
|
|
459
|
+
for i in range(max_retries):
|
|
460
|
+
try:
|
|
461
|
+
logger(f" - Attempting SSH connection ({i + 1}/{max_retries})...")
|
|
462
|
+
conn = self._get_connection(ip_address)
|
|
463
|
+
conn.open() # Explicitly open the connection
|
|
464
|
+
logger(" - SSH connection established.")
|
|
465
|
+
break
|
|
466
|
+
except Exception as e:
|
|
467
|
+
if i < max_retries - 1:
|
|
468
|
+
logger(" - SSH connection failed. Retrying in 10s...")
|
|
469
|
+
time.sleep(10)
|
|
470
|
+
else:
|
|
471
|
+
raise DeploymentError(
|
|
472
|
+
f"Failed to establish SSH connection: {e}", stage="Polling"
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
if not conn or not conn.is_connected:
|
|
476
|
+
raise DeploymentError("Could not establish SSH connection.", stage="Polling")
|
|
477
|
+
|
|
478
|
+
logger(" - [DEBUG] Entering SSH context for Phase 5 polling...")
|
|
479
|
+
with conn:
|
|
480
|
+
last_log_line = 0
|
|
481
|
+
logger(" - Polling server setup log (/root/setup.log)...")
|
|
482
|
+
for i in range(120): # 20-minute timeout
|
|
483
|
+
# Heartbeat
|
|
484
|
+
if i % 3 == 0: # Every 30 seconds
|
|
485
|
+
logger(f" - Phase 5 Heartbeat: Waiting for setup completion ({i+1}/120)...")
|
|
486
|
+
|
|
487
|
+
# Check for completion with timeout
|
|
488
|
+
try:
|
|
489
|
+
check_result = conn.run("test -f /root/setup_complete", warn=True, hide=True, timeout=10)
|
|
490
|
+
if check_result.ok:
|
|
491
|
+
logger(" - Cloud-init setup complete.")
|
|
492
|
+
break
|
|
493
|
+
except Exception as e:
|
|
494
|
+
logger(f" - [Warning] Status check failed: {e}. Retrying...")
|
|
495
|
+
|
|
496
|
+
# Tail the setup log for visibility
|
|
497
|
+
try:
|
|
498
|
+
log_result = conn.run(f"tail -n +{last_log_line + 1} /root/setup.log 2>/dev/null", warn=True, hide=True, timeout=10)
|
|
499
|
+
if log_result.ok and log_result.stdout.strip():
|
|
500
|
+
new_lines = log_result.stdout.strip().split("\n")
|
|
501
|
+
for line in new_lines:
|
|
502
|
+
if line.strip():
|
|
503
|
+
logger(f" [Server Setup] {line.strip()}")
|
|
504
|
+
last_log_line += len(new_lines)
|
|
505
|
+
except Exception as e:
|
|
506
|
+
# Log doesn't exist yet or tail failed
|
|
507
|
+
pass
|
|
508
|
+
|
|
509
|
+
time.sleep(10)
|
|
510
|
+
else:
|
|
511
|
+
raise DeploymentError(
|
|
512
|
+
"Server setup script failed to complete in time.", stage="Polling"
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# === 6. CODE UPLOAD STAGE ===
|
|
516
|
+
logger("\n[bold blue]PHASE 6: UPLOADING APPLICATION CODE[/bold blue]")
|
|
517
|
+
with self._get_connection(ip_address) as conn:
|
|
518
|
+
# Option 1: Git clone (if repo_url provided)
|
|
519
|
+
if repo_url:
|
|
520
|
+
logger(f" - Cloning repository from {repo_url} (branch: {branch})...")
|
|
521
|
+
# Use --branch to checkout specific branch, --single-branch for efficiency
|
|
522
|
+
clone_cmd = f"git clone --branch {branch} --single-branch {repo_url} /root/app"
|
|
523
|
+
result = conn.run(clone_cmd, warn=True, hide=True)
|
|
524
|
+
if result.failed:
|
|
525
|
+
# Try without --single-branch in case branch doesn't exist
|
|
526
|
+
# Clean up any partial clone first
|
|
527
|
+
logger(f" - Branch '{branch}' clone failed, trying default branch...")
|
|
528
|
+
conn.run("rm -rf /root/app", warn=True, hide=True)
|
|
529
|
+
conn.run(f"git clone {repo_url} /root/app")
|
|
530
|
+
|
|
531
|
+
# Option 2: Delta upload (if file_manifest provided)
|
|
532
|
+
elif file_manifest and get_file_content:
|
|
533
|
+
logger(f" - Syncing {len(file_manifest)} files via delta upload...")
|
|
534
|
+
|
|
535
|
+
# Ensure /root/app exists
|
|
536
|
+
conn.run("mkdir -p /root/app", hide=True)
|
|
537
|
+
|
|
538
|
+
for i, file_info in enumerate(file_manifest):
|
|
539
|
+
path = file_info['path']
|
|
540
|
+
sha = file_info['sha']
|
|
541
|
+
size = file_info.get('size', 0)
|
|
542
|
+
|
|
543
|
+
# Build Safety: Placeholder for 0-byte critical files
|
|
544
|
+
# (Hatchling/Pip fail if README.md or __init__.py are mentioned but empty)
|
|
545
|
+
is_critical_empty = (
|
|
546
|
+
size == 0 and
|
|
547
|
+
(path.lower() == 'readme.md' or path.endswith('__init__.py'))
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
# Smart Incremental Sync: Check if file exists and has same SHA
|
|
551
|
+
remote_path = f"/root/app/{path}"
|
|
552
|
+
check_sha_cmd = f"sha256sum {remote_path}"
|
|
553
|
+
result = conn.run(check_sha_cmd, warn=True, hide=True)
|
|
554
|
+
|
|
555
|
+
if result.ok:
|
|
556
|
+
remote_sha = result.stdout.split()[0]
|
|
557
|
+
if remote_sha == sha and not is_critical_empty:
|
|
558
|
+
# File already exists and matches, skip upload
|
|
559
|
+
continue
|
|
560
|
+
|
|
561
|
+
# Get file content from storage
|
|
562
|
+
content = get_file_content(sha)
|
|
563
|
+
if content is None:
|
|
564
|
+
raise DeploymentError(f"File not found in storage: {path} (sha: {sha})", stage="Code Upload")
|
|
565
|
+
|
|
566
|
+
# Apply placeholder if critical and empty
|
|
567
|
+
if is_critical_empty:
|
|
568
|
+
content = b"# xenfra placeholder\n"
|
|
569
|
+
logger(f" - [Zen Mode] Injected placeholder into empty {path}")
|
|
570
|
+
|
|
571
|
+
# Create directory if needed
|
|
572
|
+
dir_path = os.path.dirname(path)
|
|
573
|
+
if dir_path:
|
|
574
|
+
conn.run(f"mkdir -p /root/app/{dir_path}", warn=True, hide=True)
|
|
575
|
+
|
|
576
|
+
# Use SFTP for file transfer (handles large files)
|
|
577
|
+
from io import BytesIO
|
|
578
|
+
conn.put(BytesIO(content), remote_path)
|
|
579
|
+
|
|
580
|
+
# Progress update every 10 files
|
|
581
|
+
if (i + 1) % 10 == 0 or i == len(file_manifest) - 1:
|
|
582
|
+
logger(f" - Synced {i + 1}/{len(file_manifest)} files...")
|
|
583
|
+
|
|
584
|
+
logger(f" - All {len(file_manifest)} files synced.")
|
|
585
|
+
|
|
586
|
+
# Option 3: Local rsync (only works locally, not in service mode)
|
|
587
|
+
else:
|
|
588
|
+
# Note: Early validation in Phase 0 should have caught this for service mode
|
|
589
|
+
private_key_path = str(Path.home() / ".ssh" / "id_rsa")
|
|
590
|
+
rsync_cmd = f'rsync -avz --exclude=".git" --exclude=".venv" --exclude="__pycache__" -e "ssh -i {private_key_path} -o StrictHostKeyChecking=no" . root@{ip_address}:/root/app/'
|
|
591
|
+
logger(f" - Uploading local code via rsync...")
|
|
592
|
+
result = subprocess.run(rsync_cmd, shell=True, capture_output=True, text=True)
|
|
593
|
+
if result.returncode != 0:
|
|
594
|
+
raise DeploymentError(f"rsync failed: {result.stderr}", stage="Code Upload")
|
|
595
|
+
logger(" - Code upload complete.")
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
# === 6.5. WRITE DEPLOYMENT ASSETS TO DROPLET ===
|
|
599
|
+
logger("\n[bold blue]PHASE 6.5: WRITING DEPLOYMENT ASSETS[/bold blue]")
|
|
600
|
+
with self._get_connection(ip_address) as conn:
|
|
601
|
+
for filename, content in rendered_assets.items():
|
|
602
|
+
# Use heredoc with unique delimiter to write file content
|
|
603
|
+
# Single-quoted delimiter prevents shell variable expansion
|
|
604
|
+
logger(f" - Writing {filename}...")
|
|
605
|
+
try:
|
|
606
|
+
# Use base64 encoding to safely transfer file content
|
|
607
|
+
# Use printf to avoid issues with special characters
|
|
608
|
+
import base64
|
|
609
|
+
encoded_content = base64.b64encode(content.encode()).decode()
|
|
610
|
+
# Use printf with %s to handle any special characters in base64
|
|
611
|
+
conn.run(f"printf '%s' '{encoded_content}' | base64 -d > /root/app/{filename}")
|
|
612
|
+
except Exception as e:
|
|
613
|
+
raise DeploymentError(f"Failed to write {filename}: {e}", stage="Asset Write")
|
|
614
|
+
logger(" - Deployment assets written.")
|
|
615
|
+
|
|
616
|
+
# === 7. FINAL DEPLOY STAGE ===
|
|
617
|
+
if is_dockerized:
|
|
618
|
+
logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
|
|
619
|
+
with self._get_connection(ip_address) as conn:
|
|
620
|
+
# Force --no-cache to ensure updated files (like README.md placeholders) are used
|
|
621
|
+
result = conn.run("cd /root/app && docker compose build --no-cache && docker compose up -d", hide=True)
|
|
622
|
+
if result.failed:
|
|
623
|
+
raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
|
|
624
|
+
logger(" - Docker build complete, containers starting...")
|
|
625
|
+
else:
|
|
626
|
+
logger("\n[bold blue]PHASE 7: STARTING HOST-BASED APPLICATION[/bold blue]")
|
|
627
|
+
start_command = context.get("command", f"uvicorn main:app --port {context.get('port', 8000)}")
|
|
628
|
+
with self._get_connection(ip_address) as conn:
|
|
629
|
+
result = conn.run(f"cd /root/app && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt && nohup .venv/bin/{start_command} > app.log 2>&1 &", hide=True)
|
|
630
|
+
if result.failed:
|
|
631
|
+
raise DeploymentError(f"Host-based start failed: {result.stderr}", stage="Deploy")
|
|
632
|
+
logger(f" - Application started via: {start_command}")
|
|
633
|
+
|
|
634
|
+
# Multi-service: Configure Caddy for path-based routing (Gateway or Single-Droplet)
|
|
635
|
+
if multi_service_caddy:
|
|
636
|
+
logger(" - Configuring Caddy for multi-service routing...")
|
|
637
|
+
with self._get_connection(ip_address) as conn:
|
|
638
|
+
# Write Caddyfile to Caddy's config directory
|
|
639
|
+
import base64
|
|
640
|
+
encoded_caddy = base64.b64encode(multi_service_caddy.encode()).decode()
|
|
641
|
+
conn.run(f"printf '%s' '{encoded_caddy}' | base64 -d > /etc/caddy/Caddyfile", warn=True)
|
|
642
|
+
# Reload Caddy to pick up new config
|
|
643
|
+
conn.run("systemctl reload caddy || systemctl restart caddy", warn=True)
|
|
644
|
+
logger(" - Caddy configured for path-based routing")
|
|
645
|
+
|
|
646
|
+
# === 8. VERIFICATION STAGE ===
|
|
647
|
+
logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
|
|
648
|
+
|
|
649
|
+
# Give container a moment to initialize before first health check
|
|
650
|
+
time.sleep(5)
|
|
651
|
+
|
|
652
|
+
app_port = context.get("port", 8000)
|
|
653
|
+
for i in range(24): # 2-minute timeout for health checks
|
|
654
|
+
logger(f" - Health check attempt {i + 1}/24...")
|
|
655
|
+
with self._get_connection(ip_address) as conn:
|
|
656
|
+
# Check if running
|
|
657
|
+
if is_dockerized:
|
|
658
|
+
ps_result = conn.run("cd /root/app && docker compose ps", hide=True)
|
|
659
|
+
ps_output = ps_result.stdout.lower()
|
|
660
|
+
# Docker Compose V1 shows "running", V2 shows "Up" in status
|
|
661
|
+
running = "running" in ps_output or " up " in ps_output
|
|
662
|
+
if "restarting" in ps_output:
|
|
663
|
+
logs = conn.run("cd /root/app && docker compose logs --tail 20", hide=True).stdout
|
|
664
|
+
raise DeploymentError(f"Application is crash-looping (restarting). Logs:\n{logs}", stage="Verification")
|
|
665
|
+
else:
|
|
666
|
+
ps_result = conn.run("ps aux | grep -v grep | grep python", hide=True)
|
|
667
|
+
running = ps_result.ok and len(ps_result.stdout.strip()) > 0
|
|
668
|
+
|
|
669
|
+
if not running:
|
|
670
|
+
time.sleep(5)
|
|
671
|
+
continue
|
|
672
|
+
|
|
673
|
+
# Check if application is responsive (port is listening)
|
|
674
|
+
# Accept ANY HTTP response (including 404) - it means the app is running
|
|
675
|
+
# Use curl with -w to get HTTP code, accept any response >= 100
|
|
676
|
+
port_check = conn.run(
|
|
677
|
+
f"curl -s -o /dev/null -w '%{{http_code}}' --connect-timeout 3 http://localhost:{app_port}/",
|
|
678
|
+
warn=True, hide=True
|
|
679
|
+
)
|
|
680
|
+
# curl may exit non-zero for 404, but still outputs HTTP code
|
|
681
|
+
http_code = port_check.stdout.strip()
|
|
682
|
+
|
|
683
|
+
# Any HTTP response (200, 404, 500, etc.) means app is running
|
|
684
|
+
if http_code.isdigit() and int(http_code) >= 100:
|
|
685
|
+
|
|
686
|
+
logger(
|
|
687
|
+
"[bold green] - Health check passed! Application is live.[/bold green]"
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
# === 9. PERSISTENCE STAGE ===
|
|
691
|
+
logger("\n[bold blue]PHASE 9: SAVING DEPLOYMENT TO DATABASE[/bold blue]")
|
|
692
|
+
project = Project(
|
|
693
|
+
droplet_id=droplet.id,
|
|
694
|
+
name=droplet.name,
|
|
695
|
+
ip_address=ip_address,
|
|
696
|
+
status=droplet.status,
|
|
697
|
+
region=droplet.region["slug"],
|
|
698
|
+
size=droplet.size_slug,
|
|
699
|
+
user_id=user_id, # Save the user_id
|
|
700
|
+
)
|
|
701
|
+
session.add(project)
|
|
702
|
+
session.commit()
|
|
703
|
+
logger(" - Deployment saved.")
|
|
704
|
+
|
|
705
|
+
return droplet # Return the full droplet object
|
|
706
|
+
time.sleep(5)
|
|
707
|
+
else:
|
|
708
|
+
# Capture logs on timeout failure
|
|
709
|
+
with self._get_connection(ip_address) as conn:
|
|
710
|
+
logs = conn.run("cd /root/app && docker compose logs --tail 50", hide=True, warn=True).stdout if is_dockerized else ""
|
|
711
|
+
raise DeploymentError(f"Application failed to become healthy in time. Logs:\n{logs}", stage="Verification")
|
|
712
|
+
|
|
713
|
+
except Exception as e:
|
|
714
|
+
if droplet:
|
|
715
|
+
if cleanup_on_failure:
|
|
716
|
+
logger("[bold yellow]Cleaning up resources...[/bold yellow]")
|
|
717
|
+
try:
|
|
718
|
+
# 1. Destroy droplet (DigitalOcean API)
|
|
719
|
+
logger(f" - Destroying droplet '{droplet.name}'...")
|
|
720
|
+
droplet.destroy()
|
|
721
|
+
logger(" - Droplet destroyed.")
|
|
722
|
+
|
|
723
|
+
# 2. Remove from database
|
|
724
|
+
if session:
|
|
725
|
+
statement = select(Project).where(Project.droplet_id == droplet.id)
|
|
726
|
+
project_to_delete = session.exec(statement).first()
|
|
727
|
+
if project_to_delete:
|
|
728
|
+
session.delete(project_to_delete)
|
|
729
|
+
session.commit()
|
|
730
|
+
logger(" - Database record removed.")
|
|
731
|
+
|
|
732
|
+
logger("[bold green]Cleanup completed.[/bold green]")
|
|
733
|
+
except Exception as cleanup_error:
|
|
734
|
+
logger(f"[bold red]Cleanup failed: {cleanup_error}[/bold red]")
|
|
735
|
+
logger("[yellow]You may need to manually delete from DigitalOcean.[/yellow]")
|
|
736
|
+
else:
|
|
737
|
+
logger(
|
|
738
|
+
f"[bold red]Deployment failed. Server '{droplet.name}' NOT cleaned up.[/bold red]"
|
|
739
|
+
)
|
|
740
|
+
logger("[dim]Tip: Use --cleanup-on-failure to auto-cleanup.[/dim]")
|
|
741
|
+
raise e
|