xenfra 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra/commands/deployments.py +252 -64
- xenfra/commands/intelligence.py +18 -0
- xenfra/utils/config.py +28 -26
- {xenfra-0.3.6.dist-info → xenfra-0.3.8.dist-info}/METADATA +1 -1
- {xenfra-0.3.6.dist-info → xenfra-0.3.8.dist-info}/RECORD +7 -7
- {xenfra-0.3.6.dist-info → xenfra-0.3.8.dist-info}/WHEEL +0 -0
- {xenfra-0.3.6.dist-info → xenfra-0.3.8.dist-info}/entry_points.txt +0 -0
xenfra/commands/deployments.py
CHANGED
|
@@ -14,7 +14,7 @@ from xenfra_sdk.privacy import scrub_logs
|
|
|
14
14
|
|
|
15
15
|
from ..utils.auth import API_BASE_URL, get_auth_token
|
|
16
16
|
from ..utils.codebase import has_xenfra_config
|
|
17
|
-
from ..utils.config import apply_patch
|
|
17
|
+
from ..utils.config import apply_patch, read_xenfra_yaml
|
|
18
18
|
from ..utils.validation import (
|
|
19
19
|
validate_branch_name,
|
|
20
20
|
validate_deployment_id,
|
|
@@ -23,6 +23,12 @@ from ..utils.validation import (
|
|
|
23
23
|
validate_project_name,
|
|
24
24
|
)
|
|
25
25
|
|
|
26
|
+
import time
|
|
27
|
+
from datetime import datetime
|
|
28
|
+
|
|
29
|
+
from rich.live import Live
|
|
30
|
+
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn
|
|
31
|
+
|
|
26
32
|
console = Console()
|
|
27
33
|
|
|
28
34
|
# Maximum number of retry attempts for auto-healing
|
|
@@ -59,6 +65,186 @@ def show_patch_preview(patch_data: dict):
|
|
|
59
65
|
console.print()
|
|
60
66
|
|
|
61
67
|
|
|
68
|
+
def _stream_deployment(client: XenfraClient, project_name: str, git_repo: str, branch: str, framework: str, region: str, size: str):
|
|
69
|
+
"""
|
|
70
|
+
Creates deployment with real-time SSE streaming (no polling needed).
|
|
71
|
+
|
|
72
|
+
Returns tuple of (status, deployment_id, logs_buffer)
|
|
73
|
+
"""
|
|
74
|
+
console.print(Panel(
|
|
75
|
+
f"[bold cyan]Project:[/bold cyan] {project_name}\n"
|
|
76
|
+
f"[bold cyan]Mode:[/bold cyan] Real-time Streaming Deployment",
|
|
77
|
+
title="[bold green]🚀 Deployment Starting[/bold green]",
|
|
78
|
+
border_style="green"
|
|
79
|
+
))
|
|
80
|
+
|
|
81
|
+
deployment_id = None
|
|
82
|
+
logs_buffer = []
|
|
83
|
+
status_val = "PENDING"
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
for event in client.deployments.create_stream(
|
|
87
|
+
project_name=project_name,
|
|
88
|
+
git_repo=git_repo,
|
|
89
|
+
branch=branch,
|
|
90
|
+
framework=framework,
|
|
91
|
+
region=region,
|
|
92
|
+
size_slug=size,
|
|
93
|
+
):
|
|
94
|
+
event_type = event.get("event", "message")
|
|
95
|
+
data = event.get("data", "")
|
|
96
|
+
|
|
97
|
+
if event_type == "deployment_created":
|
|
98
|
+
# Extract deployment ID
|
|
99
|
+
if isinstance(data, dict):
|
|
100
|
+
deployment_id = data.get("deployment_id")
|
|
101
|
+
console.print(f"[bold green]✓[/bold green] Deployment created: [cyan]{deployment_id}[/cyan]\n")
|
|
102
|
+
|
|
103
|
+
elif event_type == "log":
|
|
104
|
+
# Real-time log output
|
|
105
|
+
log_line = str(data)
|
|
106
|
+
logs_buffer.append(log_line)
|
|
107
|
+
|
|
108
|
+
# Colorize output
|
|
109
|
+
if any(x in log_line for x in ["ERROR", "FAILED", "✗"]):
|
|
110
|
+
console.print(f"[bold red]{log_line}[/bold red]")
|
|
111
|
+
elif any(x in log_line for x in ["WARN", "WARNING", "⚠"]):
|
|
112
|
+
console.print(f"[yellow]{log_line}[/yellow]")
|
|
113
|
+
elif any(x in log_line for x in ["SUCCESS", "COMPLETED", "✓", "passed!"]):
|
|
114
|
+
console.print(f"[bold green]{log_line}[/bold green]")
|
|
115
|
+
elif "PHASE" in log_line:
|
|
116
|
+
console.print(f"\n[bold blue]{log_line}[/bold blue]")
|
|
117
|
+
elif "[InfraEngine]" in log_line or "[INFO]" in log_line:
|
|
118
|
+
console.print(f"[cyan]›[/cyan] {log_line}")
|
|
119
|
+
else:
|
|
120
|
+
console.print(f"[dim]{log_line}[/dim]")
|
|
121
|
+
|
|
122
|
+
elif event_type == "error":
|
|
123
|
+
error_msg = str(data)
|
|
124
|
+
logs_buffer.append(f"ERROR: {error_msg}")
|
|
125
|
+
console.print(f"\n[bold red]❌ Error: {error_msg}[/bold red]")
|
|
126
|
+
status_val = "FAILED"
|
|
127
|
+
|
|
128
|
+
elif event_type == "deployment_complete":
|
|
129
|
+
# Final status
|
|
130
|
+
if isinstance(data, dict):
|
|
131
|
+
status_val = data.get("status", "UNKNOWN")
|
|
132
|
+
ip_address = data.get("ip_address")
|
|
133
|
+
|
|
134
|
+
console.print()
|
|
135
|
+
if status_val == "SUCCESS":
|
|
136
|
+
console.print("[bold green]✨ SUCCESS: Your application is live![/bold green]")
|
|
137
|
+
if ip_address and ip_address != "unknown":
|
|
138
|
+
console.print(f"[bold]Accessible at:[/bold] [link=http://{ip_address}]http://{ip_address}[/link]")
|
|
139
|
+
elif status_val == "FAILED":
|
|
140
|
+
console.print("[bold red]❌ DEPLOYMENT FAILED[/bold red]")
|
|
141
|
+
error = data.get("error")
|
|
142
|
+
if error:
|
|
143
|
+
console.print(f"[red]Error: {error}[/red]")
|
|
144
|
+
break
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
console.print(f"\n[bold red]❌ Streaming error: {e}[/bold red]")
|
|
148
|
+
status_val = "FAILED"
|
|
149
|
+
logs_buffer.append(f"Streaming error: {e}")
|
|
150
|
+
|
|
151
|
+
return (status_val, deployment_id, "\n".join(logs_buffer))
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _follow_deployment(client: XenfraClient, deployment_id: str):
|
|
155
|
+
"""
|
|
156
|
+
Polls logs and status in real-time until completion with CI/CD style output.
|
|
157
|
+
(LEGACY - Used for backward compatibility)
|
|
158
|
+
"""
|
|
159
|
+
console.print(Panel(
|
|
160
|
+
f"[bold cyan]Deployment ID:[/bold cyan] {deployment_id}\n"
|
|
161
|
+
f"[bold cyan]Mode:[/bold cyan] Streaming Real-time Infrastructure Logs",
|
|
162
|
+
title="[bold green]🚀 Deployment Monitor[/bold green]",
|
|
163
|
+
border_style="green"
|
|
164
|
+
))
|
|
165
|
+
|
|
166
|
+
last_log_len = 0
|
|
167
|
+
status_val = "PENDING"
|
|
168
|
+
|
|
169
|
+
# Use a live display for the progress bar at the bottom
|
|
170
|
+
with Progress(
|
|
171
|
+
SpinnerColumn(),
|
|
172
|
+
TextColumn("[bold blue]{task.description}"),
|
|
173
|
+
BarColumn(bar_width=40),
|
|
174
|
+
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
|
|
175
|
+
console=console,
|
|
176
|
+
transient=False,
|
|
177
|
+
) as progress:
|
|
178
|
+
task = progress.add_task("Waiting for server response...", total=100)
|
|
179
|
+
|
|
180
|
+
while status_val not in ["SUCCESS", "FAILED", "CANCELLED"]:
|
|
181
|
+
try:
|
|
182
|
+
# 1. Update Status
|
|
183
|
+
dep_status = client.deployments.get_status(deployment_id)
|
|
184
|
+
status_val = dep_status.get("status", "PENDING")
|
|
185
|
+
progress_val = dep_status.get("progress", 0)
|
|
186
|
+
state = dep_status.get("state", "preparing")
|
|
187
|
+
|
|
188
|
+
# Use a more descriptive description for the progress task
|
|
189
|
+
desc = f"Phase: {state}"
|
|
190
|
+
if status_val == "FAILED":
|
|
191
|
+
desc = "[bold red]FAILED[/bold red]"
|
|
192
|
+
elif status_val == "SUCCESS":
|
|
193
|
+
desc = "[bold green]SUCCESS[/bold green]"
|
|
194
|
+
|
|
195
|
+
progress.update(task, completed=progress_val, description=desc)
|
|
196
|
+
|
|
197
|
+
# 2. Update Logs
|
|
198
|
+
log_content = client.deployments.get_logs(deployment_id)
|
|
199
|
+
if log_content and len(log_content) > last_log_len:
|
|
200
|
+
new_logs = log_content[last_log_len:].strip()
|
|
201
|
+
for line in new_logs.split("\n"):
|
|
202
|
+
# Process and colorize lines
|
|
203
|
+
clean_line = line.strip()
|
|
204
|
+
if not clean_line:
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
if any(x in clean_line for x in ["ERROR", "FAILED", "✗"]):
|
|
208
|
+
progress.console.print(f"[bold red]{clean_line}[/bold red]")
|
|
209
|
+
elif any(x in clean_line for x in ["WARN", "WARNING", "⚠"]):
|
|
210
|
+
progress.console.print(f"[yellow]{clean_line}[/yellow]")
|
|
211
|
+
elif any(x in clean_line for x in ["SUCCESS", "COMPLETED", "✓", "passed!"]):
|
|
212
|
+
progress.console.print(f"[bold green]{clean_line}[/bold green]")
|
|
213
|
+
elif "PHASE" in clean_line:
|
|
214
|
+
progress.console.print(f"\n[bold blue]{clean_line}[/bold blue]")
|
|
215
|
+
elif "[InfraEngine]" in clean_line:
|
|
216
|
+
progress.console.print(f"[dim]{clean_line}[/dim]")
|
|
217
|
+
else:
|
|
218
|
+
progress.console.print(f"[cyan]›[/cyan] {clean_line}")
|
|
219
|
+
|
|
220
|
+
last_log_len = len(log_content)
|
|
221
|
+
|
|
222
|
+
if status_val in ["SUCCESS", "FAILED", "CANCELLED"]:
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
time.sleep(1.5) # Slightly faster polling for better feel
|
|
226
|
+
except Exception as e:
|
|
227
|
+
# progress.console.print(f"[dim]Transient connection issue: {e}[/dim]")
|
|
228
|
+
time.sleep(3)
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
console.print()
|
|
232
|
+
if status_val == "SUCCESS":
|
|
233
|
+
console.print("[bold green]✨ SUCCESS: Your application is live![/bold green]")
|
|
234
|
+
# Try to get the IP address
|
|
235
|
+
try:
|
|
236
|
+
final_status = client.deployments.get_status(deployment_id)
|
|
237
|
+
ip = final_status.get("ip_address")
|
|
238
|
+
if ip:
|
|
239
|
+
console.print(f"[bold]Accessible at:[/bold] [link=http://{ip}]http://{ip}[/link]")
|
|
240
|
+
except:
|
|
241
|
+
pass
|
|
242
|
+
elif status_val == "FAILED":
|
|
243
|
+
console.print("\n[bold red]❌ FAILURE DETECTED: Entering AI Diagnosis Mode...[/bold red]")
|
|
244
|
+
|
|
245
|
+
return status_val
|
|
246
|
+
|
|
247
|
+
|
|
62
248
|
def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
63
249
|
"""
|
|
64
250
|
Execute the Zen Nod auto-healing workflow.
|
|
@@ -74,6 +260,12 @@ def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
|
74
260
|
console.print()
|
|
75
261
|
console.print(f"[cyan]🤖 Analyzing failure (attempt {attempt}/{MAX_RETRY_ATTEMPTS})...[/cyan]")
|
|
76
262
|
|
|
263
|
+
# Slice logs to last 300 lines for focused diagnosis (Fix #26)
|
|
264
|
+
log_lines = logs.split("\n")
|
|
265
|
+
if len(log_lines) > 300:
|
|
266
|
+
logs = "\n".join(log_lines[-300:])
|
|
267
|
+
console.print("[dim]Note: Analyzing only the last 300 lines of logs for efficiency.[/dim]")
|
|
268
|
+
|
|
77
269
|
# Scrub sensitive data from logs
|
|
78
270
|
scrubbed_logs = scrub_logs(logs)
|
|
79
271
|
|
|
@@ -120,8 +312,10 @@ def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
|
120
312
|
@click.option("--git-repo", help="Git repository URL (if deploying from git)")
|
|
121
313
|
@click.option("--branch", default="main", help="Git branch (default: main)")
|
|
122
314
|
@click.option("--framework", help="Framework override (fastapi, flask, django)")
|
|
315
|
+
@click.option("--region", help="DigitalOcean region override")
|
|
316
|
+
@click.option("--size", help="DigitalOcean size slug override")
|
|
123
317
|
@click.option("--no-heal", is_flag=True, help="Disable auto-healing on failure")
|
|
124
|
-
def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
318
|
+
def deploy(project_name, git_repo, branch, framework, region, size, no_heal):
|
|
125
319
|
"""
|
|
126
320
|
Deploy current project to DigitalOcean with auto-healing.
|
|
127
321
|
|
|
@@ -150,6 +344,35 @@ def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
|
150
344
|
console.print("[dim]Deployment cancelled.[/dim]")
|
|
151
345
|
return
|
|
152
346
|
|
|
347
|
+
# Load configuration from xenfra.yaml if it exists
|
|
348
|
+
config = {}
|
|
349
|
+
if has_xenfra_config():
|
|
350
|
+
try:
|
|
351
|
+
config = read_xenfra_yaml()
|
|
352
|
+
except Exception as e:
|
|
353
|
+
console.print(f"[yellow]Warning: Could not read xenfra.yaml: {e}[/dim]")
|
|
354
|
+
|
|
355
|
+
# Resolve values with precedence: 1. CLI Flag, 2. xenfra.yaml, 3. Default
|
|
356
|
+
project_name = project_name or config.get("name") or os.path.basename(os.getcwd())
|
|
357
|
+
framework = framework or config.get("framework")
|
|
358
|
+
region = region or config.get("region") or "nyc3"
|
|
359
|
+
|
|
360
|
+
# Resolve size slug (complex mapping)
|
|
361
|
+
if not size:
|
|
362
|
+
if config.get("size"):
|
|
363
|
+
size = config.get("size")
|
|
364
|
+
else:
|
|
365
|
+
instance_size = config.get("instance_size", "basic")
|
|
366
|
+
resources = config.get("resources", {})
|
|
367
|
+
cpu = resources.get("cpu", 1)
|
|
368
|
+
|
|
369
|
+
if instance_size == "standard" or cpu >= 2:
|
|
370
|
+
size = "s-2vcpu-4gb"
|
|
371
|
+
elif instance_size == "premium" or cpu >= 4:
|
|
372
|
+
size = "s-4vcpu-8gb"
|
|
373
|
+
else:
|
|
374
|
+
size = "s-1vcpu-1gb"
|
|
375
|
+
|
|
153
376
|
# Default project name to current directory
|
|
154
377
|
if not project_name:
|
|
155
378
|
project_name = os.path.basename(os.getcwd())
|
|
@@ -211,67 +434,34 @@ def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
|
211
434
|
console.print("[dim]Auto-detecting framework...[/dim]")
|
|
212
435
|
framework = "fastapi" # Default for now
|
|
213
436
|
|
|
214
|
-
# Create deployment
|
|
437
|
+
# Create deployment with real-time streaming
|
|
215
438
|
try:
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
) as progress:
|
|
225
|
-
# Track deployment phases
|
|
226
|
-
task = progress.add_task("Creating deployment...", total=100)
|
|
227
|
-
|
|
228
|
-
deployment = client.deployments.create(
|
|
229
|
-
project_name=project_name,
|
|
230
|
-
git_repo=git_repo,
|
|
231
|
-
branch=branch,
|
|
232
|
-
framework=framework,
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
progress.update(task, advance=100, description="Deployment created!")
|
|
236
|
-
|
|
237
|
-
deployment_id = deployment["deployment_id"]
|
|
238
|
-
console.print(
|
|
239
|
-
f"[bold green]✓[/bold green] Deployment created: [cyan]{deployment_id}[/cyan]"
|
|
439
|
+
status_result, deployment_id, logs_data = _stream_deployment(
|
|
440
|
+
client=client,
|
|
441
|
+
project_name=project_name,
|
|
442
|
+
git_repo=git_repo,
|
|
443
|
+
branch=branch,
|
|
444
|
+
framework=framework,
|
|
445
|
+
region=region,
|
|
446
|
+
size=size,
|
|
240
447
|
)
|
|
241
448
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
details_table.add_column("Value", style="white")
|
|
449
|
+
if status_result == "FAILED" and not no_heal:
|
|
450
|
+
# Hand off to the Zen Nod AI Agent
|
|
451
|
+
should_retry = zen_nod_workflow(logs_data, client, attempt)
|
|
246
452
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
453
|
+
if should_retry:
|
|
454
|
+
# The agent applied a fix, loop back for attempt + 1
|
|
455
|
+
continue
|
|
456
|
+
else:
|
|
457
|
+
# Agent couldn't fix it or user declined
|
|
458
|
+
raise click.Abort()
|
|
459
|
+
|
|
460
|
+
# If we got here with success, break the retry loop
|
|
461
|
+
if status_result == "SUCCESS":
|
|
462
|
+
break
|
|
252
463
|
else:
|
|
253
|
-
|
|
254
|
-
details_table.add_row("Framework", framework)
|
|
255
|
-
details_table.add_row("Status", deployment.get("status", "PENDING"))
|
|
256
|
-
|
|
257
|
-
panel = Panel(
|
|
258
|
-
details_table,
|
|
259
|
-
title="[bold green]Deployment Started[/bold green]",
|
|
260
|
-
border_style="green",
|
|
261
|
-
)
|
|
262
|
-
console.print(panel)
|
|
263
|
-
|
|
264
|
-
# Show next steps
|
|
265
|
-
console.print("\n[bold]Next steps:[/bold]")
|
|
266
|
-
console.print(f" • Monitor status: [cyan]xenfra status {deployment_id}[/cyan]")
|
|
267
|
-
console.print(f" • View logs: [cyan]xenfra logs {deployment_id}[/cyan]")
|
|
268
|
-
if not no_heal:
|
|
269
|
-
console.print(
|
|
270
|
-
f" • Diagnose issues: [cyan]xenfra diagnose {deployment_id}[/cyan]"
|
|
271
|
-
)
|
|
272
|
-
|
|
273
|
-
# Success - break out of retry loop
|
|
274
|
-
break
|
|
464
|
+
raise click.Abort()
|
|
275
465
|
|
|
276
466
|
except XenfraAPIError as e:
|
|
277
467
|
# Deployment failed - try to provide helpful error
|
|
@@ -373,9 +563,8 @@ def logs(deployment_id, follow, tail):
|
|
|
373
563
|
console.print(f"\n[bold]Logs for deployment {deployment_id}:[/bold]\n")
|
|
374
564
|
|
|
375
565
|
if follow:
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
)
|
|
566
|
+
_follow_deployment(client, deployment_id)
|
|
567
|
+
return
|
|
379
568
|
|
|
380
569
|
# Display logs
|
|
381
570
|
for line in log_lines:
|
|
@@ -427,9 +616,8 @@ def status(deployment_id, watch):
|
|
|
427
616
|
deployment_status = client.deployments.get_status(deployment_id)
|
|
428
617
|
|
|
429
618
|
if watch:
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
)
|
|
619
|
+
_follow_deployment(client, deployment_id)
|
|
620
|
+
return
|
|
433
621
|
|
|
434
622
|
# Display status
|
|
435
623
|
status_value = deployment_status.get("status", "UNKNOWN")
|
xenfra/commands/intelligence.py
CHANGED
|
@@ -190,7 +190,16 @@ def init(manual, accept_all):
|
|
|
190
190
|
table.add_row("Workers", ", ".join(analysis.workers))
|
|
191
191
|
table.add_row("Package Manager", selected_package_manager)
|
|
192
192
|
table.add_row("Dependency File", selected_dependency_file)
|
|
193
|
+
|
|
194
|
+
# New: Infrastructure details in summary
|
|
195
|
+
table.add_row("Region", "nyc3 (default)")
|
|
193
196
|
table.add_row("Instance Size", analysis.instance_size)
|
|
197
|
+
|
|
198
|
+
# Resource visualization
|
|
199
|
+
cpu = 1 if analysis.instance_size == "basic" else (2 if analysis.instance_size == "standard" else 4)
|
|
200
|
+
ram = "1GB" if analysis.instance_size == "basic" else ("4GB" if analysis.instance_size == "standard" else "8GB")
|
|
201
|
+
table.add_row("Resources", f"{cpu} vCPU, {ram} RAM")
|
|
202
|
+
|
|
194
203
|
table.add_row("Estimated Cost", f"${analysis.estimated_cost_monthly:.2f}/month")
|
|
195
204
|
table.add_row("Confidence", f"{analysis.confidence:.0%}")
|
|
196
205
|
|
|
@@ -371,7 +380,16 @@ def analyze():
|
|
|
371
380
|
table.add_row("Workers", ", ".join(analysis.workers))
|
|
372
381
|
if analysis.env_vars:
|
|
373
382
|
table.add_row("Environment Variables", ", ".join(analysis.env_vars))
|
|
383
|
+
|
|
384
|
+
# New: Infrastructure details in preview
|
|
385
|
+
table.add_row("Region", "nyc3 (default)")
|
|
374
386
|
table.add_row("Instance Size", analysis.instance_size)
|
|
387
|
+
|
|
388
|
+
# Resource visualization
|
|
389
|
+
cpu = 1 if analysis.instance_size == "basic" else (2 if analysis.instance_size == "standard" else 4)
|
|
390
|
+
ram = "1GB" if analysis.instance_size == "basic" else ("4GB" if analysis.instance_size == "standard" else "8GB")
|
|
391
|
+
table.add_row("Resources", f"{cpu} vCPU, {ram} RAM")
|
|
392
|
+
|
|
375
393
|
table.add_row("Estimated Cost", f"${analysis.estimated_cost_monthly:.2f}/month")
|
|
376
394
|
table.add_row("Confidence", f"{analysis.confidence:.0%}")
|
|
377
395
|
|
xenfra/utils/config.py
CHANGED
|
@@ -29,18 +29,8 @@ def read_xenfra_yaml(filename: str = "xenfra.yaml") -> dict:
|
|
|
29
29
|
Raises:
|
|
30
30
|
FileNotFoundError: If the config file doesn't exist
|
|
31
31
|
yaml.YAMLError: If the YAML is malformed
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
Read and parse xenfra.yaml configuration file.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
filename: Path to the config file (default: xenfra.yaml)
|
|
38
|
-
|
|
39
|
-
Returns:
|
|
40
|
-
Dictionary containing the configuration
|
|
41
|
-
|
|
42
|
-
Raises:
|
|
43
|
-
FileNotFoundError: If the config file doesn't exist
|
|
32
|
+
ValueError: If the YAML is invalid
|
|
33
|
+
IOError: If reading fails
|
|
44
34
|
"""
|
|
45
35
|
if not Path(filename).exists():
|
|
46
36
|
raise FileNotFoundError(
|
|
@@ -71,6 +61,7 @@ def generate_xenfra_yaml(analysis: CodebaseAnalysisResponse, filename: str = "xe
|
|
|
71
61
|
config = {
|
|
72
62
|
"name": os.path.basename(os.getcwd()),
|
|
73
63
|
"framework": analysis.framework,
|
|
64
|
+
"region": "nyc3", # Default to NYC3
|
|
74
65
|
"port": analysis.port,
|
|
75
66
|
}
|
|
76
67
|
|
|
@@ -90,8 +81,20 @@ def generate_xenfra_yaml(analysis: CodebaseAnalysisResponse, filename: str = "xe
|
|
|
90
81
|
if analysis.env_vars and len(analysis.env_vars) > 0:
|
|
91
82
|
config["env_vars"] = analysis.env_vars
|
|
92
83
|
|
|
93
|
-
#
|
|
84
|
+
# Infrastructure configuration
|
|
94
85
|
config["instance_size"] = analysis.instance_size
|
|
86
|
+
config["resources"] = {
|
|
87
|
+
"cpu": 1,
|
|
88
|
+
"ram": "1GB"
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Map resources based on detected size for better defaults
|
|
92
|
+
if analysis.instance_size == "standard":
|
|
93
|
+
config["resources"]["cpu"] = 2
|
|
94
|
+
config["resources"]["ram"] = "4GB"
|
|
95
|
+
elif analysis.instance_size == "premium":
|
|
96
|
+
config["resources"]["cpu"] = 4
|
|
97
|
+
config["resources"]["ram"] = "8GB"
|
|
95
98
|
|
|
96
99
|
# Add package manager info (for intelligent diagnosis)
|
|
97
100
|
if analysis.package_manager:
|
|
@@ -162,17 +165,7 @@ def apply_patch(patch: dict, target_file: str = None, create_backup_file: bool =
|
|
|
162
165
|
raise ValueError(
|
|
163
166
|
f"Invalid patch operation: {operation}. Must be 'add', 'replace', or 'remove'"
|
|
164
167
|
)
|
|
165
|
-
"""
|
|
166
|
-
Apply a JSON patch to a configuration file with automatic backup.
|
|
167
|
-
|
|
168
|
-
Args:
|
|
169
|
-
patch: Patch object with file, operation, path, value
|
|
170
|
-
target_file: Optional override for the file to patch
|
|
171
|
-
create_backup_file: Whether to create a backup before patching (default: True)
|
|
172
168
|
|
|
173
|
-
Returns:
|
|
174
|
-
Path to the backup file if created, None otherwise
|
|
175
|
-
"""
|
|
176
169
|
file_to_patch = target_file or patch.get("file")
|
|
177
170
|
|
|
178
171
|
if not file_to_patch:
|
|
@@ -193,7 +186,7 @@ def apply_patch(patch: dict, target_file: str = None, create_backup_file: bool =
|
|
|
193
186
|
|
|
194
187
|
# Apply patch based on operation
|
|
195
188
|
operation = patch.get("operation")
|
|
196
|
-
path = patch.get("path"
|
|
189
|
+
path = (patch.get("path") or "").strip("/")
|
|
197
190
|
value = patch.get("value")
|
|
198
191
|
|
|
199
192
|
if operation == "add":
|
|
@@ -235,7 +228,7 @@ def apply_patch(patch: dict, target_file: str = None, create_backup_file: bool =
|
|
|
235
228
|
config_data = json.load(f)
|
|
236
229
|
|
|
237
230
|
operation = patch.get("operation")
|
|
238
|
-
path = patch.get("path"
|
|
231
|
+
path = (patch.get("path") or "").strip("/")
|
|
239
232
|
value = patch.get("value")
|
|
240
233
|
|
|
241
234
|
if operation == "add":
|
|
@@ -289,7 +282,7 @@ def apply_patch(patch: dict, target_file: str = None, create_backup_file: bool =
|
|
|
289
282
|
config_data = toml.load(f)
|
|
290
283
|
|
|
291
284
|
operation = patch.get("operation")
|
|
292
|
-
path = patch.get("path"
|
|
285
|
+
path = (patch.get("path") or "").strip("/")
|
|
293
286
|
value = patch.get("value")
|
|
294
287
|
|
|
295
288
|
if operation == "add":
|
|
@@ -400,12 +393,21 @@ def manual_prompt_for_config(filename: str = "xenfra.yaml") -> str:
|
|
|
400
393
|
cache_type = Prompt.ask("Cache type", choices=["redis", "memcached"], default="redis")
|
|
401
394
|
config["cache"] = {"type": cache_type, "env_var": f"{cache_type.upper()}_URL"}
|
|
402
395
|
|
|
396
|
+
# Region
|
|
397
|
+
config["region"] = Prompt.ask("Region", choices=["nyc3", "sfo3", "ams3", "fra1", "lon1"], default="nyc3")
|
|
398
|
+
|
|
403
399
|
# Instance size
|
|
404
400
|
instance_size = Prompt.ask(
|
|
405
401
|
"Instance size", choices=["basic", "standard", "premium"], default="basic"
|
|
406
402
|
)
|
|
407
403
|
config["instance_size"] = instance_size
|
|
408
404
|
|
|
405
|
+
# Resources (CPU/RAM)
|
|
406
|
+
config["resources"] = {
|
|
407
|
+
"cpu": IntPrompt.ask("CPU (vCPUs)", default=1 if instance_size == "basic" else 2),
|
|
408
|
+
"ram": Prompt.ask("RAM (e.g., 1GB, 4GB)", default="1GB" if instance_size == "basic" else "4GB"),
|
|
409
|
+
}
|
|
410
|
+
|
|
409
411
|
# Environment variables
|
|
410
412
|
add_env = Confirm.ask("Add environment variables?", default=False)
|
|
411
413
|
if add_env:
|
|
@@ -2,19 +2,19 @@ xenfra/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
xenfra/commands/__init__.py,sha256=kTTwVnTvoxikyPUhQiyTAbnw4PYafktuE1----TqQoA,43
|
|
3
3
|
xenfra/commands/auth.py,sha256=ecReVCGl7Ys2d77mv_e4mCbs4ug6FLIb3S9dl2FUhr4,4178
|
|
4
4
|
xenfra/commands/auth_device.py,sha256=caD2UdveEZtAFjgjmnA-l5bjbbPONFjXJXgeJN7mhbk,6710
|
|
5
|
-
xenfra/commands/deployments.py,sha256=
|
|
6
|
-
xenfra/commands/intelligence.py,sha256=
|
|
5
|
+
xenfra/commands/deployments.py,sha256=6-wwUno1uEHxse2X2yNhnNoKxdcNFLSBcvEiWkbuv4E,37873
|
|
6
|
+
xenfra/commands/intelligence.py,sha256=_H0t9OJwPbd9E0r1tcMACrt6-UBPrrTII8M47kC_iHA,17496
|
|
7
7
|
xenfra/commands/projects.py,sha256=SAxF_pOr95K6uz35U-zENptKndKxJNZn6bcD45PHcpI,6696
|
|
8
8
|
xenfra/commands/security_cmd.py,sha256=EI5sjX2lcMxgMH-LCFmPVkc9YqadOrcoSgTiKknkVRY,7327
|
|
9
9
|
xenfra/main.py,sha256=2EPPuIdxjhW-I-e-Mc0i2ayeLaSJdmzddNThkXq7B7c,2033
|
|
10
10
|
xenfra/utils/__init__.py,sha256=4ZRYkrb--vzoXjBHG8zRxz2jCXNGtAoKNtkyu2WRI2A,45
|
|
11
11
|
xenfra/utils/auth.py,sha256=9JbFnv0-rdlJF-4hKD2uWd9h9ehqC1iIHee1O5e-3RM,13769
|
|
12
12
|
xenfra/utils/codebase.py,sha256=GMrqhOJWX8q5ZXSLI9P3hJZBpufXMQA3Z4fKh2XSTNo,5949
|
|
13
|
-
xenfra/utils/config.py,sha256=
|
|
13
|
+
xenfra/utils/config.py,sha256=K2k7hxz94dzbxvCw_PDXtq4o1VlmJMTFktlL-F2g5rY,14786
|
|
14
14
|
xenfra/utils/errors.py,sha256=6G91YzzDDNkKHANTgfAMiOiMElEyi57wo6-FzRa4VuQ,4211
|
|
15
15
|
xenfra/utils/security.py,sha256=EA8CIPLt8Y-QP5uZ7c5NuC6ZLRV1aZS8NapS9ix_vok,11479
|
|
16
16
|
xenfra/utils/validation.py,sha256=cvuL_AEFJ2oCoP0abCqoOIABOwz79Gkf-jh_dcFIQlM,6912
|
|
17
|
-
xenfra-0.3.
|
|
18
|
-
xenfra-0.3.
|
|
19
|
-
xenfra-0.3.
|
|
20
|
-
xenfra-0.3.
|
|
17
|
+
xenfra-0.3.8.dist-info/WHEEL,sha256=KSLUh82mDPEPk0Bx0ScXlWL64bc8KmzIPNcpQZFV-6E,79
|
|
18
|
+
xenfra-0.3.8.dist-info/entry_points.txt,sha256=a_2cGhYK__X6eW05Ba8uB6RIM_61c2sHtXsPY8N0mic,45
|
|
19
|
+
xenfra-0.3.8.dist-info/METADATA,sha256=VqoXK6mEDKpitS0Em_3Okgq00fupy19Mp39qUPiloHI,3898
|
|
20
|
+
xenfra-0.3.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|