xenfra 0.3.7__py3-none-any.whl → 0.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra/commands/deployments.py +252 -64
- xenfra/commands/intelligence.py +18 -0
- xenfra/utils/config.py +23 -1
- {xenfra-0.3.7.dist-info → xenfra-0.3.8.dist-info}/METADATA +1 -1
- {xenfra-0.3.7.dist-info → xenfra-0.3.8.dist-info}/RECORD +7 -7
- {xenfra-0.3.7.dist-info → xenfra-0.3.8.dist-info}/WHEEL +0 -0
- {xenfra-0.3.7.dist-info → xenfra-0.3.8.dist-info}/entry_points.txt +0 -0
xenfra/commands/deployments.py
CHANGED
|
@@ -14,7 +14,7 @@ from xenfra_sdk.privacy import scrub_logs
|
|
|
14
14
|
|
|
15
15
|
from ..utils.auth import API_BASE_URL, get_auth_token
|
|
16
16
|
from ..utils.codebase import has_xenfra_config
|
|
17
|
-
from ..utils.config import apply_patch
|
|
17
|
+
from ..utils.config import apply_patch, read_xenfra_yaml
|
|
18
18
|
from ..utils.validation import (
|
|
19
19
|
validate_branch_name,
|
|
20
20
|
validate_deployment_id,
|
|
@@ -23,6 +23,12 @@ from ..utils.validation import (
|
|
|
23
23
|
validate_project_name,
|
|
24
24
|
)
|
|
25
25
|
|
|
26
|
+
import time
|
|
27
|
+
from datetime import datetime
|
|
28
|
+
|
|
29
|
+
from rich.live import Live
|
|
30
|
+
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn
|
|
31
|
+
|
|
26
32
|
console = Console()
|
|
27
33
|
|
|
28
34
|
# Maximum number of retry attempts for auto-healing
|
|
@@ -59,6 +65,186 @@ def show_patch_preview(patch_data: dict):
|
|
|
59
65
|
console.print()
|
|
60
66
|
|
|
61
67
|
|
|
68
|
+
def _stream_deployment(client: XenfraClient, project_name: str, git_repo: str, branch: str, framework: str, region: str, size: str):
|
|
69
|
+
"""
|
|
70
|
+
Creates deployment with real-time SSE streaming (no polling needed).
|
|
71
|
+
|
|
72
|
+
Returns tuple of (status, deployment_id, logs_buffer)
|
|
73
|
+
"""
|
|
74
|
+
console.print(Panel(
|
|
75
|
+
f"[bold cyan]Project:[/bold cyan] {project_name}\n"
|
|
76
|
+
f"[bold cyan]Mode:[/bold cyan] Real-time Streaming Deployment",
|
|
77
|
+
title="[bold green]🚀 Deployment Starting[/bold green]",
|
|
78
|
+
border_style="green"
|
|
79
|
+
))
|
|
80
|
+
|
|
81
|
+
deployment_id = None
|
|
82
|
+
logs_buffer = []
|
|
83
|
+
status_val = "PENDING"
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
for event in client.deployments.create_stream(
|
|
87
|
+
project_name=project_name,
|
|
88
|
+
git_repo=git_repo,
|
|
89
|
+
branch=branch,
|
|
90
|
+
framework=framework,
|
|
91
|
+
region=region,
|
|
92
|
+
size_slug=size,
|
|
93
|
+
):
|
|
94
|
+
event_type = event.get("event", "message")
|
|
95
|
+
data = event.get("data", "")
|
|
96
|
+
|
|
97
|
+
if event_type == "deployment_created":
|
|
98
|
+
# Extract deployment ID
|
|
99
|
+
if isinstance(data, dict):
|
|
100
|
+
deployment_id = data.get("deployment_id")
|
|
101
|
+
console.print(f"[bold green]✓[/bold green] Deployment created: [cyan]{deployment_id}[/cyan]\n")
|
|
102
|
+
|
|
103
|
+
elif event_type == "log":
|
|
104
|
+
# Real-time log output
|
|
105
|
+
log_line = str(data)
|
|
106
|
+
logs_buffer.append(log_line)
|
|
107
|
+
|
|
108
|
+
# Colorize output
|
|
109
|
+
if any(x in log_line for x in ["ERROR", "FAILED", "✗"]):
|
|
110
|
+
console.print(f"[bold red]{log_line}[/bold red]")
|
|
111
|
+
elif any(x in log_line for x in ["WARN", "WARNING", "⚠"]):
|
|
112
|
+
console.print(f"[yellow]{log_line}[/yellow]")
|
|
113
|
+
elif any(x in log_line for x in ["SUCCESS", "COMPLETED", "✓", "passed!"]):
|
|
114
|
+
console.print(f"[bold green]{log_line}[/bold green]")
|
|
115
|
+
elif "PHASE" in log_line:
|
|
116
|
+
console.print(f"\n[bold blue]{log_line}[/bold blue]")
|
|
117
|
+
elif "[InfraEngine]" in log_line or "[INFO]" in log_line:
|
|
118
|
+
console.print(f"[cyan]›[/cyan] {log_line}")
|
|
119
|
+
else:
|
|
120
|
+
console.print(f"[dim]{log_line}[/dim]")
|
|
121
|
+
|
|
122
|
+
elif event_type == "error":
|
|
123
|
+
error_msg = str(data)
|
|
124
|
+
logs_buffer.append(f"ERROR: {error_msg}")
|
|
125
|
+
console.print(f"\n[bold red]❌ Error: {error_msg}[/bold red]")
|
|
126
|
+
status_val = "FAILED"
|
|
127
|
+
|
|
128
|
+
elif event_type == "deployment_complete":
|
|
129
|
+
# Final status
|
|
130
|
+
if isinstance(data, dict):
|
|
131
|
+
status_val = data.get("status", "UNKNOWN")
|
|
132
|
+
ip_address = data.get("ip_address")
|
|
133
|
+
|
|
134
|
+
console.print()
|
|
135
|
+
if status_val == "SUCCESS":
|
|
136
|
+
console.print("[bold green]✨ SUCCESS: Your application is live![/bold green]")
|
|
137
|
+
if ip_address and ip_address != "unknown":
|
|
138
|
+
console.print(f"[bold]Accessible at:[/bold] [link=http://{ip_address}]http://{ip_address}[/link]")
|
|
139
|
+
elif status_val == "FAILED":
|
|
140
|
+
console.print("[bold red]❌ DEPLOYMENT FAILED[/bold red]")
|
|
141
|
+
error = data.get("error")
|
|
142
|
+
if error:
|
|
143
|
+
console.print(f"[red]Error: {error}[/red]")
|
|
144
|
+
break
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
console.print(f"\n[bold red]❌ Streaming error: {e}[/bold red]")
|
|
148
|
+
status_val = "FAILED"
|
|
149
|
+
logs_buffer.append(f"Streaming error: {e}")
|
|
150
|
+
|
|
151
|
+
return (status_val, deployment_id, "\n".join(logs_buffer))
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _follow_deployment(client: XenfraClient, deployment_id: str):
|
|
155
|
+
"""
|
|
156
|
+
Polls logs and status in real-time until completion with CI/CD style output.
|
|
157
|
+
(LEGACY - Used for backward compatibility)
|
|
158
|
+
"""
|
|
159
|
+
console.print(Panel(
|
|
160
|
+
f"[bold cyan]Deployment ID:[/bold cyan] {deployment_id}\n"
|
|
161
|
+
f"[bold cyan]Mode:[/bold cyan] Streaming Real-time Infrastructure Logs",
|
|
162
|
+
title="[bold green]🚀 Deployment Monitor[/bold green]",
|
|
163
|
+
border_style="green"
|
|
164
|
+
))
|
|
165
|
+
|
|
166
|
+
last_log_len = 0
|
|
167
|
+
status_val = "PENDING"
|
|
168
|
+
|
|
169
|
+
# Use a live display for the progress bar at the bottom
|
|
170
|
+
with Progress(
|
|
171
|
+
SpinnerColumn(),
|
|
172
|
+
TextColumn("[bold blue]{task.description}"),
|
|
173
|
+
BarColumn(bar_width=40),
|
|
174
|
+
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
|
|
175
|
+
console=console,
|
|
176
|
+
transient=False,
|
|
177
|
+
) as progress:
|
|
178
|
+
task = progress.add_task("Waiting for server response...", total=100)
|
|
179
|
+
|
|
180
|
+
while status_val not in ["SUCCESS", "FAILED", "CANCELLED"]:
|
|
181
|
+
try:
|
|
182
|
+
# 1. Update Status
|
|
183
|
+
dep_status = client.deployments.get_status(deployment_id)
|
|
184
|
+
status_val = dep_status.get("status", "PENDING")
|
|
185
|
+
progress_val = dep_status.get("progress", 0)
|
|
186
|
+
state = dep_status.get("state", "preparing")
|
|
187
|
+
|
|
188
|
+
# Use a more descriptive description for the progress task
|
|
189
|
+
desc = f"Phase: {state}"
|
|
190
|
+
if status_val == "FAILED":
|
|
191
|
+
desc = "[bold red]FAILED[/bold red]"
|
|
192
|
+
elif status_val == "SUCCESS":
|
|
193
|
+
desc = "[bold green]SUCCESS[/bold green]"
|
|
194
|
+
|
|
195
|
+
progress.update(task, completed=progress_val, description=desc)
|
|
196
|
+
|
|
197
|
+
# 2. Update Logs
|
|
198
|
+
log_content = client.deployments.get_logs(deployment_id)
|
|
199
|
+
if log_content and len(log_content) > last_log_len:
|
|
200
|
+
new_logs = log_content[last_log_len:].strip()
|
|
201
|
+
for line in new_logs.split("\n"):
|
|
202
|
+
# Process and colorize lines
|
|
203
|
+
clean_line = line.strip()
|
|
204
|
+
if not clean_line:
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
if any(x in clean_line for x in ["ERROR", "FAILED", "✗"]):
|
|
208
|
+
progress.console.print(f"[bold red]{clean_line}[/bold red]")
|
|
209
|
+
elif any(x in clean_line for x in ["WARN", "WARNING", "⚠"]):
|
|
210
|
+
progress.console.print(f"[yellow]{clean_line}[/yellow]")
|
|
211
|
+
elif any(x in clean_line for x in ["SUCCESS", "COMPLETED", "✓", "passed!"]):
|
|
212
|
+
progress.console.print(f"[bold green]{clean_line}[/bold green]")
|
|
213
|
+
elif "PHASE" in clean_line:
|
|
214
|
+
progress.console.print(f"\n[bold blue]{clean_line}[/bold blue]")
|
|
215
|
+
elif "[InfraEngine]" in clean_line:
|
|
216
|
+
progress.console.print(f"[dim]{clean_line}[/dim]")
|
|
217
|
+
else:
|
|
218
|
+
progress.console.print(f"[cyan]›[/cyan] {clean_line}")
|
|
219
|
+
|
|
220
|
+
last_log_len = len(log_content)
|
|
221
|
+
|
|
222
|
+
if status_val in ["SUCCESS", "FAILED", "CANCELLED"]:
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
time.sleep(1.5) # Slightly faster polling for better feel
|
|
226
|
+
except Exception as e:
|
|
227
|
+
# progress.console.print(f"[dim]Transient connection issue: {e}[/dim]")
|
|
228
|
+
time.sleep(3)
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
console.print()
|
|
232
|
+
if status_val == "SUCCESS":
|
|
233
|
+
console.print("[bold green]✨ SUCCESS: Your application is live![/bold green]")
|
|
234
|
+
# Try to get the IP address
|
|
235
|
+
try:
|
|
236
|
+
final_status = client.deployments.get_status(deployment_id)
|
|
237
|
+
ip = final_status.get("ip_address")
|
|
238
|
+
if ip:
|
|
239
|
+
console.print(f"[bold]Accessible at:[/bold] [link=http://{ip}]http://{ip}[/link]")
|
|
240
|
+
except:
|
|
241
|
+
pass
|
|
242
|
+
elif status_val == "FAILED":
|
|
243
|
+
console.print("\n[bold red]❌ FAILURE DETECTED: Entering AI Diagnosis Mode...[/bold red]")
|
|
244
|
+
|
|
245
|
+
return status_val
|
|
246
|
+
|
|
247
|
+
|
|
62
248
|
def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
63
249
|
"""
|
|
64
250
|
Execute the Zen Nod auto-healing workflow.
|
|
@@ -74,6 +260,12 @@ def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
|
74
260
|
console.print()
|
|
75
261
|
console.print(f"[cyan]🤖 Analyzing failure (attempt {attempt}/{MAX_RETRY_ATTEMPTS})...[/cyan]")
|
|
76
262
|
|
|
263
|
+
# Slice logs to last 300 lines for focused diagnosis (Fix #26)
|
|
264
|
+
log_lines = logs.split("\n")
|
|
265
|
+
if len(log_lines) > 300:
|
|
266
|
+
logs = "\n".join(log_lines[-300:])
|
|
267
|
+
console.print("[dim]Note: Analyzing only the last 300 lines of logs for efficiency.[/dim]")
|
|
268
|
+
|
|
77
269
|
# Scrub sensitive data from logs
|
|
78
270
|
scrubbed_logs = scrub_logs(logs)
|
|
79
271
|
|
|
@@ -120,8 +312,10 @@ def zen_nod_workflow(logs: str, client: XenfraClient, attempt: int) -> bool:
|
|
|
120
312
|
@click.option("--git-repo", help="Git repository URL (if deploying from git)")
|
|
121
313
|
@click.option("--branch", default="main", help="Git branch (default: main)")
|
|
122
314
|
@click.option("--framework", help="Framework override (fastapi, flask, django)")
|
|
315
|
+
@click.option("--region", help="DigitalOcean region override")
|
|
316
|
+
@click.option("--size", help="DigitalOcean size slug override")
|
|
123
317
|
@click.option("--no-heal", is_flag=True, help="Disable auto-healing on failure")
|
|
124
|
-
def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
318
|
+
def deploy(project_name, git_repo, branch, framework, region, size, no_heal):
|
|
125
319
|
"""
|
|
126
320
|
Deploy current project to DigitalOcean with auto-healing.
|
|
127
321
|
|
|
@@ -150,6 +344,35 @@ def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
|
150
344
|
console.print("[dim]Deployment cancelled.[/dim]")
|
|
151
345
|
return
|
|
152
346
|
|
|
347
|
+
# Load configuration from xenfra.yaml if it exists
|
|
348
|
+
config = {}
|
|
349
|
+
if has_xenfra_config():
|
|
350
|
+
try:
|
|
351
|
+
config = read_xenfra_yaml()
|
|
352
|
+
except Exception as e:
|
|
353
|
+
console.print(f"[yellow]Warning: Could not read xenfra.yaml: {e}[/dim]")
|
|
354
|
+
|
|
355
|
+
# Resolve values with precedence: 1. CLI Flag, 2. xenfra.yaml, 3. Default
|
|
356
|
+
project_name = project_name or config.get("name") or os.path.basename(os.getcwd())
|
|
357
|
+
framework = framework or config.get("framework")
|
|
358
|
+
region = region or config.get("region") or "nyc3"
|
|
359
|
+
|
|
360
|
+
# Resolve size slug (complex mapping)
|
|
361
|
+
if not size:
|
|
362
|
+
if config.get("size"):
|
|
363
|
+
size = config.get("size")
|
|
364
|
+
else:
|
|
365
|
+
instance_size = config.get("instance_size", "basic")
|
|
366
|
+
resources = config.get("resources", {})
|
|
367
|
+
cpu = resources.get("cpu", 1)
|
|
368
|
+
|
|
369
|
+
if instance_size == "standard" or cpu >= 2:
|
|
370
|
+
size = "s-2vcpu-4gb"
|
|
371
|
+
elif instance_size == "premium" or cpu >= 4:
|
|
372
|
+
size = "s-4vcpu-8gb"
|
|
373
|
+
else:
|
|
374
|
+
size = "s-1vcpu-1gb"
|
|
375
|
+
|
|
153
376
|
# Default project name to current directory
|
|
154
377
|
if not project_name:
|
|
155
378
|
project_name = os.path.basename(os.getcwd())
|
|
@@ -211,67 +434,34 @@ def deploy(project_name, git_repo, branch, framework, no_heal):
|
|
|
211
434
|
console.print("[dim]Auto-detecting framework...[/dim]")
|
|
212
435
|
framework = "fastapi" # Default for now
|
|
213
436
|
|
|
214
|
-
# Create deployment
|
|
437
|
+
# Create deployment with real-time streaming
|
|
215
438
|
try:
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
) as progress:
|
|
225
|
-
# Track deployment phases
|
|
226
|
-
task = progress.add_task("Creating deployment...", total=100)
|
|
227
|
-
|
|
228
|
-
deployment = client.deployments.create(
|
|
229
|
-
project_name=project_name,
|
|
230
|
-
git_repo=git_repo,
|
|
231
|
-
branch=branch,
|
|
232
|
-
framework=framework,
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
progress.update(task, advance=100, description="Deployment created!")
|
|
236
|
-
|
|
237
|
-
deployment_id = deployment["deployment_id"]
|
|
238
|
-
console.print(
|
|
239
|
-
f"[bold green]✓[/bold green] Deployment created: [cyan]{deployment_id}[/cyan]"
|
|
439
|
+
status_result, deployment_id, logs_data = _stream_deployment(
|
|
440
|
+
client=client,
|
|
441
|
+
project_name=project_name,
|
|
442
|
+
git_repo=git_repo,
|
|
443
|
+
branch=branch,
|
|
444
|
+
framework=framework,
|
|
445
|
+
region=region,
|
|
446
|
+
size=size,
|
|
240
447
|
)
|
|
241
448
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
details_table.add_column("Value", style="white")
|
|
449
|
+
if status_result == "FAILED" and not no_heal:
|
|
450
|
+
# Hand off to the Zen Nod AI Agent
|
|
451
|
+
should_retry = zen_nod_workflow(logs_data, client, attempt)
|
|
246
452
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
453
|
+
if should_retry:
|
|
454
|
+
# The agent applied a fix, loop back for attempt + 1
|
|
455
|
+
continue
|
|
456
|
+
else:
|
|
457
|
+
# Agent couldn't fix it or user declined
|
|
458
|
+
raise click.Abort()
|
|
459
|
+
|
|
460
|
+
# If we got here with success, break the retry loop
|
|
461
|
+
if status_result == "SUCCESS":
|
|
462
|
+
break
|
|
252
463
|
else:
|
|
253
|
-
|
|
254
|
-
details_table.add_row("Framework", framework)
|
|
255
|
-
details_table.add_row("Status", deployment.get("status", "PENDING"))
|
|
256
|
-
|
|
257
|
-
panel = Panel(
|
|
258
|
-
details_table,
|
|
259
|
-
title="[bold green]Deployment Started[/bold green]",
|
|
260
|
-
border_style="green",
|
|
261
|
-
)
|
|
262
|
-
console.print(panel)
|
|
263
|
-
|
|
264
|
-
# Show next steps
|
|
265
|
-
console.print("\n[bold]Next steps:[/bold]")
|
|
266
|
-
console.print(f" • Monitor status: [cyan]xenfra status {deployment_id}[/cyan]")
|
|
267
|
-
console.print(f" • View logs: [cyan]xenfra logs {deployment_id}[/cyan]")
|
|
268
|
-
if not no_heal:
|
|
269
|
-
console.print(
|
|
270
|
-
f" • Diagnose issues: [cyan]xenfra diagnose {deployment_id}[/cyan]"
|
|
271
|
-
)
|
|
272
|
-
|
|
273
|
-
# Success - break out of retry loop
|
|
274
|
-
break
|
|
464
|
+
raise click.Abort()
|
|
275
465
|
|
|
276
466
|
except XenfraAPIError as e:
|
|
277
467
|
# Deployment failed - try to provide helpful error
|
|
@@ -373,9 +563,8 @@ def logs(deployment_id, follow, tail):
|
|
|
373
563
|
console.print(f"\n[bold]Logs for deployment {deployment_id}:[/bold]\n")
|
|
374
564
|
|
|
375
565
|
if follow:
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
)
|
|
566
|
+
_follow_deployment(client, deployment_id)
|
|
567
|
+
return
|
|
379
568
|
|
|
380
569
|
# Display logs
|
|
381
570
|
for line in log_lines:
|
|
@@ -427,9 +616,8 @@ def status(deployment_id, watch):
|
|
|
427
616
|
deployment_status = client.deployments.get_status(deployment_id)
|
|
428
617
|
|
|
429
618
|
if watch:
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
)
|
|
619
|
+
_follow_deployment(client, deployment_id)
|
|
620
|
+
return
|
|
433
621
|
|
|
434
622
|
# Display status
|
|
435
623
|
status_value = deployment_status.get("status", "UNKNOWN")
|
xenfra/commands/intelligence.py
CHANGED
|
@@ -190,7 +190,16 @@ def init(manual, accept_all):
|
|
|
190
190
|
table.add_row("Workers", ", ".join(analysis.workers))
|
|
191
191
|
table.add_row("Package Manager", selected_package_manager)
|
|
192
192
|
table.add_row("Dependency File", selected_dependency_file)
|
|
193
|
+
|
|
194
|
+
# New: Infrastructure details in summary
|
|
195
|
+
table.add_row("Region", "nyc3 (default)")
|
|
193
196
|
table.add_row("Instance Size", analysis.instance_size)
|
|
197
|
+
|
|
198
|
+
# Resource visualization
|
|
199
|
+
cpu = 1 if analysis.instance_size == "basic" else (2 if analysis.instance_size == "standard" else 4)
|
|
200
|
+
ram = "1GB" if analysis.instance_size == "basic" else ("4GB" if analysis.instance_size == "standard" else "8GB")
|
|
201
|
+
table.add_row("Resources", f"{cpu} vCPU, {ram} RAM")
|
|
202
|
+
|
|
194
203
|
table.add_row("Estimated Cost", f"${analysis.estimated_cost_monthly:.2f}/month")
|
|
195
204
|
table.add_row("Confidence", f"{analysis.confidence:.0%}")
|
|
196
205
|
|
|
@@ -371,7 +380,16 @@ def analyze():
|
|
|
371
380
|
table.add_row("Workers", ", ".join(analysis.workers))
|
|
372
381
|
if analysis.env_vars:
|
|
373
382
|
table.add_row("Environment Variables", ", ".join(analysis.env_vars))
|
|
383
|
+
|
|
384
|
+
# New: Infrastructure details in preview
|
|
385
|
+
table.add_row("Region", "nyc3 (default)")
|
|
374
386
|
table.add_row("Instance Size", analysis.instance_size)
|
|
387
|
+
|
|
388
|
+
# Resource visualization
|
|
389
|
+
cpu = 1 if analysis.instance_size == "basic" else (2 if analysis.instance_size == "standard" else 4)
|
|
390
|
+
ram = "1GB" if analysis.instance_size == "basic" else ("4GB" if analysis.instance_size == "standard" else "8GB")
|
|
391
|
+
table.add_row("Resources", f"{cpu} vCPU, {ram} RAM")
|
|
392
|
+
|
|
375
393
|
table.add_row("Estimated Cost", f"${analysis.estimated_cost_monthly:.2f}/month")
|
|
376
394
|
table.add_row("Confidence", f"{analysis.confidence:.0%}")
|
|
377
395
|
|
xenfra/utils/config.py
CHANGED
|
@@ -61,6 +61,7 @@ def generate_xenfra_yaml(analysis: CodebaseAnalysisResponse, filename: str = "xe
|
|
|
61
61
|
config = {
|
|
62
62
|
"name": os.path.basename(os.getcwd()),
|
|
63
63
|
"framework": analysis.framework,
|
|
64
|
+
"region": "nyc3", # Default to NYC3
|
|
64
65
|
"port": analysis.port,
|
|
65
66
|
}
|
|
66
67
|
|
|
@@ -80,8 +81,20 @@ def generate_xenfra_yaml(analysis: CodebaseAnalysisResponse, filename: str = "xe
|
|
|
80
81
|
if analysis.env_vars and len(analysis.env_vars) > 0:
|
|
81
82
|
config["env_vars"] = analysis.env_vars
|
|
82
83
|
|
|
83
|
-
#
|
|
84
|
+
# Infrastructure configuration
|
|
84
85
|
config["instance_size"] = analysis.instance_size
|
|
86
|
+
config["resources"] = {
|
|
87
|
+
"cpu": 1,
|
|
88
|
+
"ram": "1GB"
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Map resources based on detected size for better defaults
|
|
92
|
+
if analysis.instance_size == "standard":
|
|
93
|
+
config["resources"]["cpu"] = 2
|
|
94
|
+
config["resources"]["ram"] = "4GB"
|
|
95
|
+
elif analysis.instance_size == "premium":
|
|
96
|
+
config["resources"]["cpu"] = 4
|
|
97
|
+
config["resources"]["ram"] = "8GB"
|
|
85
98
|
|
|
86
99
|
# Add package manager info (for intelligent diagnosis)
|
|
87
100
|
if analysis.package_manager:
|
|
@@ -380,12 +393,21 @@ def manual_prompt_for_config(filename: str = "xenfra.yaml") -> str:
|
|
|
380
393
|
cache_type = Prompt.ask("Cache type", choices=["redis", "memcached"], default="redis")
|
|
381
394
|
config["cache"] = {"type": cache_type, "env_var": f"{cache_type.upper()}_URL"}
|
|
382
395
|
|
|
396
|
+
# Region
|
|
397
|
+
config["region"] = Prompt.ask("Region", choices=["nyc3", "sfo3", "ams3", "fra1", "lon1"], default="nyc3")
|
|
398
|
+
|
|
383
399
|
# Instance size
|
|
384
400
|
instance_size = Prompt.ask(
|
|
385
401
|
"Instance size", choices=["basic", "standard", "premium"], default="basic"
|
|
386
402
|
)
|
|
387
403
|
config["instance_size"] = instance_size
|
|
388
404
|
|
|
405
|
+
# Resources (CPU/RAM)
|
|
406
|
+
config["resources"] = {
|
|
407
|
+
"cpu": IntPrompt.ask("CPU (vCPUs)", default=1 if instance_size == "basic" else 2),
|
|
408
|
+
"ram": Prompt.ask("RAM (e.g., 1GB, 4GB)", default="1GB" if instance_size == "basic" else "4GB"),
|
|
409
|
+
}
|
|
410
|
+
|
|
389
411
|
# Environment variables
|
|
390
412
|
add_env = Confirm.ask("Add environment variables?", default=False)
|
|
391
413
|
if add_env:
|
|
@@ -2,19 +2,19 @@ xenfra/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
xenfra/commands/__init__.py,sha256=kTTwVnTvoxikyPUhQiyTAbnw4PYafktuE1----TqQoA,43
|
|
3
3
|
xenfra/commands/auth.py,sha256=ecReVCGl7Ys2d77mv_e4mCbs4ug6FLIb3S9dl2FUhr4,4178
|
|
4
4
|
xenfra/commands/auth_device.py,sha256=caD2UdveEZtAFjgjmnA-l5bjbbPONFjXJXgeJN7mhbk,6710
|
|
5
|
-
xenfra/commands/deployments.py,sha256=
|
|
6
|
-
xenfra/commands/intelligence.py,sha256=
|
|
5
|
+
xenfra/commands/deployments.py,sha256=6-wwUno1uEHxse2X2yNhnNoKxdcNFLSBcvEiWkbuv4E,37873
|
|
6
|
+
xenfra/commands/intelligence.py,sha256=_H0t9OJwPbd9E0r1tcMACrt6-UBPrrTII8M47kC_iHA,17496
|
|
7
7
|
xenfra/commands/projects.py,sha256=SAxF_pOr95K6uz35U-zENptKndKxJNZn6bcD45PHcpI,6696
|
|
8
8
|
xenfra/commands/security_cmd.py,sha256=EI5sjX2lcMxgMH-LCFmPVkc9YqadOrcoSgTiKknkVRY,7327
|
|
9
9
|
xenfra/main.py,sha256=2EPPuIdxjhW-I-e-Mc0i2ayeLaSJdmzddNThkXq7B7c,2033
|
|
10
10
|
xenfra/utils/__init__.py,sha256=4ZRYkrb--vzoXjBHG8zRxz2jCXNGtAoKNtkyu2WRI2A,45
|
|
11
11
|
xenfra/utils/auth.py,sha256=9JbFnv0-rdlJF-4hKD2uWd9h9ehqC1iIHee1O5e-3RM,13769
|
|
12
12
|
xenfra/utils/codebase.py,sha256=GMrqhOJWX8q5ZXSLI9P3hJZBpufXMQA3Z4fKh2XSTNo,5949
|
|
13
|
-
xenfra/utils/config.py,sha256=
|
|
13
|
+
xenfra/utils/config.py,sha256=K2k7hxz94dzbxvCw_PDXtq4o1VlmJMTFktlL-F2g5rY,14786
|
|
14
14
|
xenfra/utils/errors.py,sha256=6G91YzzDDNkKHANTgfAMiOiMElEyi57wo6-FzRa4VuQ,4211
|
|
15
15
|
xenfra/utils/security.py,sha256=EA8CIPLt8Y-QP5uZ7c5NuC6ZLRV1aZS8NapS9ix_vok,11479
|
|
16
16
|
xenfra/utils/validation.py,sha256=cvuL_AEFJ2oCoP0abCqoOIABOwz79Gkf-jh_dcFIQlM,6912
|
|
17
|
-
xenfra-0.3.
|
|
18
|
-
xenfra-0.3.
|
|
19
|
-
xenfra-0.3.
|
|
20
|
-
xenfra-0.3.
|
|
17
|
+
xenfra-0.3.8.dist-info/WHEEL,sha256=KSLUh82mDPEPk0Bx0ScXlWL64bc8KmzIPNcpQZFV-6E,79
|
|
18
|
+
xenfra-0.3.8.dist-info/entry_points.txt,sha256=a_2cGhYK__X6eW05Ba8uB6RIM_61c2sHtXsPY8N0mic,45
|
|
19
|
+
xenfra-0.3.8.dist-info/METADATA,sha256=VqoXK6mEDKpitS0Em_3Okgq00fupy19Mp39qUPiloHI,3898
|
|
20
|
+
xenfra-0.3.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|