tetra-rp 0.6.0__py3-none-any.whl → 0.24.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tetra_rp/__init__.py +109 -19
- tetra_rp/cli/commands/__init__.py +1 -0
- tetra_rp/cli/commands/apps.py +143 -0
- tetra_rp/cli/commands/build.py +1082 -0
- tetra_rp/cli/commands/build_utils/__init__.py +1 -0
- tetra_rp/cli/commands/build_utils/handler_generator.py +176 -0
- tetra_rp/cli/commands/build_utils/lb_handler_generator.py +309 -0
- tetra_rp/cli/commands/build_utils/manifest.py +430 -0
- tetra_rp/cli/commands/build_utils/mothership_handler_generator.py +75 -0
- tetra_rp/cli/commands/build_utils/scanner.py +596 -0
- tetra_rp/cli/commands/deploy.py +580 -0
- tetra_rp/cli/commands/init.py +123 -0
- tetra_rp/cli/commands/resource.py +108 -0
- tetra_rp/cli/commands/run.py +296 -0
- tetra_rp/cli/commands/test_mothership.py +458 -0
- tetra_rp/cli/commands/undeploy.py +533 -0
- tetra_rp/cli/main.py +97 -0
- tetra_rp/cli/utils/__init__.py +1 -0
- tetra_rp/cli/utils/app.py +15 -0
- tetra_rp/cli/utils/conda.py +127 -0
- tetra_rp/cli/utils/deployment.py +530 -0
- tetra_rp/cli/utils/ignore.py +143 -0
- tetra_rp/cli/utils/skeleton.py +184 -0
- tetra_rp/cli/utils/skeleton_template/.env.example +4 -0
- tetra_rp/cli/utils/skeleton_template/.flashignore +40 -0
- tetra_rp/cli/utils/skeleton_template/.gitignore +44 -0
- tetra_rp/cli/utils/skeleton_template/README.md +263 -0
- tetra_rp/cli/utils/skeleton_template/main.py +44 -0
- tetra_rp/cli/utils/skeleton_template/mothership.py +55 -0
- tetra_rp/cli/utils/skeleton_template/pyproject.toml +58 -0
- tetra_rp/cli/utils/skeleton_template/requirements.txt +1 -0
- tetra_rp/cli/utils/skeleton_template/workers/__init__.py +0 -0
- tetra_rp/cli/utils/skeleton_template/workers/cpu/__init__.py +19 -0
- tetra_rp/cli/utils/skeleton_template/workers/cpu/endpoint.py +36 -0
- tetra_rp/cli/utils/skeleton_template/workers/gpu/__init__.py +19 -0
- tetra_rp/cli/utils/skeleton_template/workers/gpu/endpoint.py +61 -0
- tetra_rp/client.py +136 -33
- tetra_rp/config.py +29 -0
- tetra_rp/core/api/runpod.py +591 -39
- tetra_rp/core/deployment.py +232 -0
- tetra_rp/core/discovery.py +425 -0
- tetra_rp/core/exceptions.py +50 -0
- tetra_rp/core/resources/__init__.py +27 -9
- tetra_rp/core/resources/app.py +738 -0
- tetra_rp/core/resources/base.py +139 -4
- tetra_rp/core/resources/constants.py +21 -0
- tetra_rp/core/resources/cpu.py +115 -13
- tetra_rp/core/resources/gpu.py +182 -16
- tetra_rp/core/resources/live_serverless.py +153 -16
- tetra_rp/core/resources/load_balancer_sls_resource.py +440 -0
- tetra_rp/core/resources/network_volume.py +126 -31
- tetra_rp/core/resources/resource_manager.py +436 -35
- tetra_rp/core/resources/serverless.py +537 -120
- tetra_rp/core/resources/serverless_cpu.py +201 -0
- tetra_rp/core/resources/template.py +1 -59
- tetra_rp/core/utils/constants.py +10 -0
- tetra_rp/core/utils/file_lock.py +260 -0
- tetra_rp/core/utils/http.py +67 -0
- tetra_rp/core/utils/lru_cache.py +75 -0
- tetra_rp/core/utils/singleton.py +36 -1
- tetra_rp/core/validation.py +44 -0
- tetra_rp/execute_class.py +301 -0
- tetra_rp/protos/remote_execution.py +98 -9
- tetra_rp/runtime/__init__.py +1 -0
- tetra_rp/runtime/circuit_breaker.py +274 -0
- tetra_rp/runtime/config.py +12 -0
- tetra_rp/runtime/exceptions.py +49 -0
- tetra_rp/runtime/generic_handler.py +206 -0
- tetra_rp/runtime/lb_handler.py +189 -0
- tetra_rp/runtime/load_balancer.py +160 -0
- tetra_rp/runtime/manifest_fetcher.py +192 -0
- tetra_rp/runtime/metrics.py +325 -0
- tetra_rp/runtime/models.py +73 -0
- tetra_rp/runtime/mothership_provisioner.py +512 -0
- tetra_rp/runtime/production_wrapper.py +266 -0
- tetra_rp/runtime/reliability_config.py +149 -0
- tetra_rp/runtime/retry_manager.py +118 -0
- tetra_rp/runtime/serialization.py +124 -0
- tetra_rp/runtime/service_registry.py +346 -0
- tetra_rp/runtime/state_manager_client.py +248 -0
- tetra_rp/stubs/live_serverless.py +35 -17
- tetra_rp/stubs/load_balancer_sls.py +357 -0
- tetra_rp/stubs/registry.py +145 -19
- {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/METADATA +398 -60
- tetra_rp-0.24.0.dist-info/RECORD +99 -0
- {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/WHEEL +1 -1
- tetra_rp-0.24.0.dist-info/entry_points.txt +2 -0
- tetra_rp/core/pool/cluster_manager.py +0 -177
- tetra_rp/core/pool/dataclass.py +0 -18
- tetra_rp/core/pool/ex.py +0 -38
- tetra_rp/core/pool/job.py +0 -22
- tetra_rp/core/pool/worker.py +0 -19
- tetra_rp/core/resources/utils.py +0 -50
- tetra_rp/core/utils/json.py +0 -33
- tetra_rp-0.6.0.dist-info/RECORD +0 -39
- /tetra_rp/{core/pool → cli}/__init__.py +0 -0
- {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""Resource management commands."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import typer
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
from rich.table import Table
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
from rich.live import Live
|
|
9
|
+
|
|
10
|
+
from ...core.resources.resource_manager import ResourceManager
|
|
11
|
+
|
|
12
|
+
console = Console()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def report_command(
|
|
16
|
+
live: bool = typer.Option(False, "--live", "-l", help="Live updating status"),
|
|
17
|
+
refresh: int = typer.Option(
|
|
18
|
+
2, "--refresh", "-r", help="Refresh interval for live mode"
|
|
19
|
+
),
|
|
20
|
+
):
|
|
21
|
+
"""Show resource status dashboard."""
|
|
22
|
+
|
|
23
|
+
resource_manager = ResourceManager()
|
|
24
|
+
|
|
25
|
+
if live:
|
|
26
|
+
try:
|
|
27
|
+
with Live(
|
|
28
|
+
generate_resource_table(resource_manager),
|
|
29
|
+
console=console,
|
|
30
|
+
refresh_per_second=1 / refresh,
|
|
31
|
+
screen=True,
|
|
32
|
+
) as live_display:
|
|
33
|
+
while True:
|
|
34
|
+
time.sleep(refresh)
|
|
35
|
+
live_display.update(generate_resource_table(resource_manager))
|
|
36
|
+
except KeyboardInterrupt:
|
|
37
|
+
console.print("\n📊 Live monitoring stopped")
|
|
38
|
+
else:
|
|
39
|
+
table = generate_resource_table(resource_manager)
|
|
40
|
+
console.print(table)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def generate_resource_table(resource_manager: ResourceManager) -> Panel:
|
|
44
|
+
"""Generate a formatted table of resources."""
|
|
45
|
+
|
|
46
|
+
resources = resource_manager._resources
|
|
47
|
+
|
|
48
|
+
if not resources:
|
|
49
|
+
return Panel(
|
|
50
|
+
"📊 No resources currently tracked\n\n"
|
|
51
|
+
"Resources will appear here after running your Tetra applications.",
|
|
52
|
+
title="Resource Status Report",
|
|
53
|
+
expand=False,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
table = Table(title="Resource Status Report")
|
|
57
|
+
table.add_column("Resource ID", style="cyan", no_wrap=True)
|
|
58
|
+
table.add_column("Status", justify="center")
|
|
59
|
+
table.add_column("Type", style="magenta")
|
|
60
|
+
table.add_column("URL", style="blue")
|
|
61
|
+
table.add_column("Health", justify="center")
|
|
62
|
+
|
|
63
|
+
active_count = 0
|
|
64
|
+
error_count = 0
|
|
65
|
+
|
|
66
|
+
for uid, resource in resources.items():
|
|
67
|
+
# Determine status
|
|
68
|
+
try:
|
|
69
|
+
is_deployed = resource.is_deployed()
|
|
70
|
+
if is_deployed:
|
|
71
|
+
status = "🟢 Active"
|
|
72
|
+
active_count += 1
|
|
73
|
+
else:
|
|
74
|
+
status = "🔴 Inactive"
|
|
75
|
+
error_count += 1
|
|
76
|
+
except Exception:
|
|
77
|
+
status = "🟡 Unknown"
|
|
78
|
+
|
|
79
|
+
# Get resource info
|
|
80
|
+
resource_type = resource.__class__.__name__
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
url = resource.url if hasattr(resource, "url") else "N/A"
|
|
84
|
+
except Exception:
|
|
85
|
+
url = "N/A"
|
|
86
|
+
|
|
87
|
+
# Health check (simplified for now)
|
|
88
|
+
health = "✓" if status == "🟢 Active" else "✗"
|
|
89
|
+
|
|
90
|
+
table.add_row(
|
|
91
|
+
uid[:20] + "..." if len(uid) > 20 else uid,
|
|
92
|
+
status,
|
|
93
|
+
resource_type,
|
|
94
|
+
url,
|
|
95
|
+
health,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Summary
|
|
99
|
+
total = len(resources)
|
|
100
|
+
idle_count = total - active_count - error_count
|
|
101
|
+
summary = f"Total: {total} resources ({active_count} active"
|
|
102
|
+
if idle_count > 0:
|
|
103
|
+
summary += f", {idle_count} idle"
|
|
104
|
+
if error_count > 0:
|
|
105
|
+
summary += f", {error_count} error"
|
|
106
|
+
summary += ")"
|
|
107
|
+
|
|
108
|
+
return Panel(table, subtitle=summary, expand=False)
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""Run Flash development server."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import signal
|
|
6
|
+
import subprocess
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
import questionary
|
|
12
|
+
import typer
|
|
13
|
+
from rich.console import Console
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
console = Console()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def run_command(
|
|
20
|
+
host: str = typer.Option(
|
|
21
|
+
"localhost",
|
|
22
|
+
"--host",
|
|
23
|
+
envvar="FLASH_HOST",
|
|
24
|
+
help="Host to bind to (env: FLASH_HOST)",
|
|
25
|
+
),
|
|
26
|
+
port: int = typer.Option(
|
|
27
|
+
8888,
|
|
28
|
+
"--port",
|
|
29
|
+
"-p",
|
|
30
|
+
envvar="FLASH_PORT",
|
|
31
|
+
help="Port to bind to (env: FLASH_PORT)",
|
|
32
|
+
),
|
|
33
|
+
reload: bool = typer.Option(
|
|
34
|
+
True, "--reload/--no-reload", help="Enable auto-reload"
|
|
35
|
+
),
|
|
36
|
+
auto_provision: bool = typer.Option(
|
|
37
|
+
False,
|
|
38
|
+
"--auto-provision",
|
|
39
|
+
help="Auto-provision deployable resources on startup",
|
|
40
|
+
),
|
|
41
|
+
):
|
|
42
|
+
"""Run Flash development server with uvicorn."""
|
|
43
|
+
|
|
44
|
+
# Discover entry point
|
|
45
|
+
entry_point = discover_entry_point()
|
|
46
|
+
if not entry_point:
|
|
47
|
+
console.print("[red]Error:[/red] No entry point found")
|
|
48
|
+
console.print("Create main.py with a FastAPI app")
|
|
49
|
+
raise typer.Exit(1)
|
|
50
|
+
|
|
51
|
+
# Check if entry point has FastAPI app
|
|
52
|
+
app_location = check_fastapi_app(entry_point)
|
|
53
|
+
if not app_location:
|
|
54
|
+
console.print(f"[red]Error:[/red] No FastAPI app found in {entry_point}")
|
|
55
|
+
console.print("Make sure your main.py contains: app = FastAPI()")
|
|
56
|
+
raise typer.Exit(1)
|
|
57
|
+
|
|
58
|
+
# Set flag for all flash run sessions to ensure both auto-provisioned
|
|
59
|
+
# and on-the-fly provisioned resources get the live- prefix
|
|
60
|
+
if not _is_reload():
|
|
61
|
+
os.environ["FLASH_IS_LIVE_PROVISIONING"] = "true"
|
|
62
|
+
|
|
63
|
+
# Auto-provision resources if flag is set and not a reload
|
|
64
|
+
if auto_provision and not _is_reload():
|
|
65
|
+
try:
|
|
66
|
+
resources = _discover_resources(entry_point)
|
|
67
|
+
|
|
68
|
+
if resources:
|
|
69
|
+
# If many resources found, ask for confirmation
|
|
70
|
+
if len(resources) > 5:
|
|
71
|
+
if not _confirm_large_provisioning(resources):
|
|
72
|
+
console.print("[yellow]Auto-provisioning cancelled[/yellow]\n")
|
|
73
|
+
else:
|
|
74
|
+
_provision_resources(resources)
|
|
75
|
+
else:
|
|
76
|
+
_provision_resources(resources)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logger.error("Auto-provisioning failed", exc_info=True)
|
|
79
|
+
console.print(
|
|
80
|
+
f"[yellow]Warning:[/yellow] Resource provisioning failed: {e}"
|
|
81
|
+
)
|
|
82
|
+
console.print(
|
|
83
|
+
"[yellow]Note:[/yellow] Resources will be deployed on-demand when first called"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
console.print("\n[green]Starting Flash Server[/green]")
|
|
87
|
+
console.print(f"Entry point: [bold]{app_location}[/bold]")
|
|
88
|
+
console.print(f"Server: [bold]http://{host}:{port}[/bold]")
|
|
89
|
+
console.print(f"Auto-reload: [bold]{'enabled' if reload else 'disabled'}[/bold]")
|
|
90
|
+
console.print("\nPress CTRL+C to stop\n")
|
|
91
|
+
|
|
92
|
+
# Build uvicorn command
|
|
93
|
+
cmd = [
|
|
94
|
+
sys.executable,
|
|
95
|
+
"-m",
|
|
96
|
+
"uvicorn",
|
|
97
|
+
app_location,
|
|
98
|
+
"--host",
|
|
99
|
+
host,
|
|
100
|
+
"--port",
|
|
101
|
+
str(port),
|
|
102
|
+
"--log-level",
|
|
103
|
+
"warning",
|
|
104
|
+
]
|
|
105
|
+
|
|
106
|
+
if reload:
|
|
107
|
+
cmd.append("--reload")
|
|
108
|
+
|
|
109
|
+
# Run uvicorn with proper process group handling
|
|
110
|
+
process = None
|
|
111
|
+
try:
|
|
112
|
+
# Create new process group to ensure all child processes can be killed together
|
|
113
|
+
# On Unix systems, use process group; on Windows, CREATE_NEW_PROCESS_GROUP
|
|
114
|
+
if sys.platform == "win32":
|
|
115
|
+
process = subprocess.Popen(
|
|
116
|
+
cmd, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP
|
|
117
|
+
)
|
|
118
|
+
else:
|
|
119
|
+
process = subprocess.Popen(cmd, preexec_fn=os.setsid)
|
|
120
|
+
|
|
121
|
+
# Wait for process to complete
|
|
122
|
+
process.wait()
|
|
123
|
+
|
|
124
|
+
except KeyboardInterrupt:
|
|
125
|
+
console.print("\n[yellow]Stopping server and cleaning up processes...[/yellow]")
|
|
126
|
+
|
|
127
|
+
# Kill the entire process group to ensure all child processes are terminated
|
|
128
|
+
if process:
|
|
129
|
+
try:
|
|
130
|
+
if sys.platform == "win32":
|
|
131
|
+
# Windows: terminate the process
|
|
132
|
+
process.terminate()
|
|
133
|
+
else:
|
|
134
|
+
# Unix: kill entire process group
|
|
135
|
+
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
|
136
|
+
|
|
137
|
+
# Wait briefly for graceful shutdown
|
|
138
|
+
try:
|
|
139
|
+
process.wait(timeout=2)
|
|
140
|
+
except subprocess.TimeoutExpired:
|
|
141
|
+
# Force kill if didn't terminate gracefully
|
|
142
|
+
if sys.platform == "win32":
|
|
143
|
+
process.kill()
|
|
144
|
+
else:
|
|
145
|
+
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
|
146
|
+
process.wait()
|
|
147
|
+
|
|
148
|
+
except (ProcessLookupError, OSError):
|
|
149
|
+
# Process already terminated
|
|
150
|
+
pass
|
|
151
|
+
|
|
152
|
+
console.print("[green]Server stopped[/green]")
|
|
153
|
+
raise typer.Exit(0)
|
|
154
|
+
|
|
155
|
+
except Exception as e:
|
|
156
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
157
|
+
if process:
|
|
158
|
+
try:
|
|
159
|
+
if sys.platform == "win32":
|
|
160
|
+
process.terminate()
|
|
161
|
+
else:
|
|
162
|
+
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
|
163
|
+
except (ProcessLookupError, OSError):
|
|
164
|
+
pass
|
|
165
|
+
raise typer.Exit(1)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def discover_entry_point() -> Optional[str]:
|
|
169
|
+
"""Discover the main entry point file."""
|
|
170
|
+
candidates = ["main.py", "app.py", "server.py"]
|
|
171
|
+
|
|
172
|
+
for candidate in candidates:
|
|
173
|
+
if Path(candidate).exists():
|
|
174
|
+
return candidate
|
|
175
|
+
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def check_fastapi_app(entry_point: str) -> Optional[str]:
|
|
180
|
+
"""
|
|
181
|
+
Check if entry point has a FastAPI app and return the app location.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
App location in format "module:app" or None
|
|
185
|
+
"""
|
|
186
|
+
try:
|
|
187
|
+
# Read the file
|
|
188
|
+
content = Path(entry_point).read_text()
|
|
189
|
+
|
|
190
|
+
# Check for FastAPI app
|
|
191
|
+
if "app = FastAPI(" in content or "app=FastAPI(" in content:
|
|
192
|
+
# Extract module name from file path
|
|
193
|
+
module = entry_point.replace(".py", "").replace("/", ".")
|
|
194
|
+
return f"{module}:app"
|
|
195
|
+
|
|
196
|
+
return None
|
|
197
|
+
|
|
198
|
+
except Exception:
|
|
199
|
+
return None
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _is_reload() -> bool:
|
|
203
|
+
"""Check if running in uvicorn reload subprocess.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
True if running in a reload subprocess
|
|
207
|
+
"""
|
|
208
|
+
return "UVICORN_RELOADER_PID" in os.environ
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _discover_resources(entry_point: str):
|
|
212
|
+
"""Discover deployable resources in entry point.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
entry_point: Path to entry point file
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
List of discovered DeployableResource instances
|
|
219
|
+
"""
|
|
220
|
+
from ...core.discovery import ResourceDiscovery
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
discovery = ResourceDiscovery(entry_point, max_depth=2)
|
|
224
|
+
resources = discovery.discover()
|
|
225
|
+
|
|
226
|
+
# Debug: Log what was discovered
|
|
227
|
+
if resources:
|
|
228
|
+
console.print(f"\n[dim]Discovered {len(resources)} resource(s):[/dim]")
|
|
229
|
+
for res in resources:
|
|
230
|
+
res_name = getattr(res, "name", "Unknown")
|
|
231
|
+
res_type = res.__class__.__name__
|
|
232
|
+
console.print(f" [dim]• {res_name} ({res_type})[/dim]")
|
|
233
|
+
console.print()
|
|
234
|
+
|
|
235
|
+
return resources
|
|
236
|
+
except Exception as e:
|
|
237
|
+
console.print(f"[yellow]Warning:[/yellow] Resource discovery failed: {e}")
|
|
238
|
+
return []
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _confirm_large_provisioning(resources) -> bool:
|
|
242
|
+
"""Show resources and prompt user for confirmation.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
resources: List of resources to provision
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
True if user confirms, False otherwise
|
|
249
|
+
"""
|
|
250
|
+
try:
|
|
251
|
+
console.print(
|
|
252
|
+
f"\n[yellow]Found {len(resources)} resources to provision:[/yellow]"
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
for resource in resources:
|
|
256
|
+
name = getattr(resource, "name", "Unknown")
|
|
257
|
+
resource_type = resource.__class__.__name__
|
|
258
|
+
console.print(f" • {name} ({resource_type})")
|
|
259
|
+
|
|
260
|
+
console.print()
|
|
261
|
+
|
|
262
|
+
confirmed = questionary.confirm(
|
|
263
|
+
"This may take several minutes. Do you want to proceed?"
|
|
264
|
+
).ask()
|
|
265
|
+
|
|
266
|
+
return confirmed if confirmed is not None else False
|
|
267
|
+
|
|
268
|
+
except (KeyboardInterrupt, EOFError):
|
|
269
|
+
console.print("\n[yellow]Cancelled[/yellow]")
|
|
270
|
+
return False
|
|
271
|
+
except Exception as e:
|
|
272
|
+
console.print(f"[yellow]Warning:[/yellow] Confirmation failed: {e}")
|
|
273
|
+
return False
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _provision_resources(resources):
|
|
277
|
+
"""Provision resources and wait for completion.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
resources: List of resources to provision
|
|
281
|
+
"""
|
|
282
|
+
import asyncio
|
|
283
|
+
from ...core.deployment import DeploymentOrchestrator
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
console.print(f"\n[bold]Provisioning {len(resources)} resource(s)...[/bold]")
|
|
287
|
+
orchestrator = DeploymentOrchestrator(max_concurrent=3)
|
|
288
|
+
|
|
289
|
+
# Run provisioning with progress shown
|
|
290
|
+
loop = asyncio.new_event_loop()
|
|
291
|
+
asyncio.set_event_loop(loop)
|
|
292
|
+
loop.run_until_complete(orchestrator.deploy_all(resources, show_progress=True))
|
|
293
|
+
loop.close()
|
|
294
|
+
|
|
295
|
+
except Exception as e:
|
|
296
|
+
console.print(f"[yellow]Warning:[/yellow] Provisioning failed: {e}")
|