velar-sdk 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- velar_sdk-0.2.0/PKG-INFO +61 -0
- velar_sdk-0.2.0/README.md +27 -0
- velar_sdk-0.2.0/pyproject.toml +49 -0
- velar_sdk-0.2.0/setup.cfg +4 -0
- velar_sdk-0.2.0/velar/__init__.py +10 -0
- velar_sdk-0.2.0/velar/app.py +355 -0
- velar_sdk-0.2.0/velar/cli.py +358 -0
- velar_sdk-0.2.0/velar/client.py +179 -0
- velar_sdk-0.2.0/velar/config.py +39 -0
- velar_sdk-0.2.0/velar/decorators.py +185 -0
- velar_sdk-0.2.0/velar/gpu.py +43 -0
- velar_sdk-0.2.0/velar/image.py +89 -0
- velar_sdk-0.2.0/velar/serialization.py +46 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/PKG-INFO +61 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/SOURCES.txt +17 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/dependency_links.txt +1 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/entry_points.txt +2 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/requires.txt +11 -0
- velar_sdk-0.2.0/velar_sdk.egg-info/top_level.txt +1 -0
velar_sdk-0.2.0/PKG-INFO
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: velar-sdk
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Velar Python SDK - Deploy ML models to GPUs with one command
|
|
5
|
+
Author-email: Velar Labs <hello@velar.run>
|
|
6
|
+
License: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://velar.run
|
|
8
|
+
Project-URL: Documentation, https://velar.run/docs
|
|
9
|
+
Project-URL: Repository, https://github.com/velarrun/velar
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/velarrun/velar/issues
|
|
11
|
+
Keywords: gpu,ml,machine-learning,deployment,cloud
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Science/Research
|
|
15
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Requires-Python: >=3.9
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
Requires-Dist: httpx>=0.25.0
|
|
25
|
+
Requires-Dist: pydantic>=2.0
|
|
26
|
+
Requires-Dist: click>=8.0
|
|
27
|
+
Requires-Dist: docker>=7.0
|
|
28
|
+
Requires-Dist: rich>=13.0
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest; extra == "dev"
|
|
31
|
+
Requires-Dist: ruff; extra == "dev"
|
|
32
|
+
Requires-Dist: build; extra == "dev"
|
|
33
|
+
Requires-Dist: twine; extra == "dev"
|
|
34
|
+
|
|
35
|
+
# Velar Python SDK
|
|
36
|
+
|
|
37
|
+
Deploy ML models to GPUs with one command.
|
|
38
|
+
|
|
39
|
+
## Installation
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
pip install velar
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Quick Start
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
import velar
|
|
49
|
+
from velar import App, Image, gpu
|
|
50
|
+
|
|
51
|
+
app = App("my-model")
|
|
52
|
+
|
|
53
|
+
@app.function(gpu=gpu.A100)
|
|
54
|
+
def run_inference(prompt: str) -> str:
|
|
55
|
+
# your model code here
|
|
56
|
+
return result
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Documentation
|
|
60
|
+
|
|
61
|
+
[velar.run/docs](https://velar.run/docs)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# Velar Python SDK
|
|
2
|
+
|
|
3
|
+
Deploy ML models to GPUs with one command.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install velar
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import velar
|
|
15
|
+
from velar import App, Image, gpu
|
|
16
|
+
|
|
17
|
+
app = App("my-model")
|
|
18
|
+
|
|
19
|
+
@app.function(gpu=gpu.A100)
|
|
20
|
+
def run_inference(prompt: str) -> str:
|
|
21
|
+
# your model code here
|
|
22
|
+
return result
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Documentation
|
|
26
|
+
|
|
27
|
+
[velar.run/docs](https://velar.run/docs)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "velar-sdk"
|
|
7
|
+
version = "0.2.0"
|
|
8
|
+
description = "Velar Python SDK - Deploy ML models to GPUs with one command"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.9"
|
|
11
|
+
license = { text = "Apache-2.0" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Velar Labs", email = "hello@velar.run" },
|
|
14
|
+
]
|
|
15
|
+
keywords = ["gpu", "ml", "machine-learning", "deployment", "cloud"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 3 - Alpha",
|
|
18
|
+
"Intended Audience :: Developers",
|
|
19
|
+
"Intended Audience :: Science/Research",
|
|
20
|
+
"License :: OSI Approved :: Apache Software License",
|
|
21
|
+
"Programming Language :: Python :: 3",
|
|
22
|
+
"Programming Language :: Python :: 3.9",
|
|
23
|
+
"Programming Language :: Python :: 3.10",
|
|
24
|
+
"Programming Language :: Python :: 3.11",
|
|
25
|
+
"Programming Language :: Python :: 3.12",
|
|
26
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
27
|
+
]
|
|
28
|
+
dependencies = [
|
|
29
|
+
"httpx>=0.25.0",
|
|
30
|
+
"pydantic>=2.0",
|
|
31
|
+
"click>=8.0",
|
|
32
|
+
"docker>=7.0",
|
|
33
|
+
"rich>=13.0",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
dev = ["pytest", "ruff", "build", "twine"]
|
|
38
|
+
|
|
39
|
+
[project.scripts]
|
|
40
|
+
velar = "velar.cli:cli"
|
|
41
|
+
|
|
42
|
+
[project.urls]
|
|
43
|
+
Homepage = "https://velar.run"
|
|
44
|
+
Documentation = "https://velar.run/docs"
|
|
45
|
+
Repository = "https://github.com/velarrun/velar"
|
|
46
|
+
"Bug Tracker" = "https://github.com/velarrun/velar/issues"
|
|
47
|
+
|
|
48
|
+
[tool.setuptools.packages.find]
|
|
49
|
+
include = ["velar*"]
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"""Velar Python SDK - Deploy ML models to GPUs with one command."""
|
|
2
|
+
|
|
3
|
+
from velar.app import App
|
|
4
|
+
from velar.decorators import function, endpoint
|
|
5
|
+
from velar.image import Image
|
|
6
|
+
from velar.serialization import serialize, deserialize
|
|
7
|
+
from velar import gpu
|
|
8
|
+
|
|
9
|
+
__all__ = ["App", "function", "endpoint", "gpu", "Image", "serialize", "deserialize"]
|
|
10
|
+
__version__ = "0.2.0"
|
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
"""App - the main entry point for Velar deployments."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import os
|
|
7
|
+
import subprocess
|
|
8
|
+
import tempfile
|
|
9
|
+
import textwrap
|
|
10
|
+
from typing import Any, Callable
|
|
11
|
+
|
|
12
|
+
from rich.console import Console
|
|
13
|
+
from rich.live import Live
|
|
14
|
+
from rich.spinner import Spinner
|
|
15
|
+
from rich.table import Table
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
|
|
18
|
+
from velar.client import VelarClient
|
|
19
|
+
from velar.config import Config
|
|
20
|
+
from velar.decorators import EndpointSpec, FunctionSpec
|
|
21
|
+
|
|
22
|
+
console = Console()
|
|
23
|
+
|
|
24
|
+
# The filename written into the container image for the generated handler.
|
|
25
|
+
_HANDLER_FILENAME = "_velar_handler.py"
|
|
26
|
+
_HANDLER_ENTRY_COMMAND = f"python /app/{_HANDLER_FILENAME}"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class App:
|
|
30
|
+
"""A Velar application that groups functions and endpoints.
|
|
31
|
+
|
|
32
|
+
Usage:
|
|
33
|
+
import velar
|
|
34
|
+
|
|
35
|
+
app = velar.App("my-ml-app")
|
|
36
|
+
|
|
37
|
+
image = velar.Image.from_registry("pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime")
|
|
38
|
+
image = image.pip_install("transformers", "accelerate")
|
|
39
|
+
|
|
40
|
+
@app.function(gpu="A100", image=image)
|
|
41
|
+
def train(data):
|
|
42
|
+
import torch
|
|
43
|
+
...
|
|
44
|
+
|
|
45
|
+
@app.endpoint(gpu="A10", image=image)
|
|
46
|
+
def predict(request):
|
|
47
|
+
...
|
|
48
|
+
|
|
49
|
+
# Deploy everything
|
|
50
|
+
app.deploy()
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, name: str = "default"):
|
|
54
|
+
self.name = name
|
|
55
|
+
self._functions: dict[str, FunctionSpec] = {}
|
|
56
|
+
self._endpoints: dict[str, EndpointSpec] = {}
|
|
57
|
+
self._client: VelarClient | None = None
|
|
58
|
+
self._local_entrypoint: Callable | None = None
|
|
59
|
+
|
|
60
|
+
def function(self, **kwargs: Any):
|
|
61
|
+
"""Register a serverless function with this app."""
|
|
62
|
+
from velar.decorators import function as fn_decorator
|
|
63
|
+
|
|
64
|
+
def wrapper(func):
|
|
65
|
+
spec = fn_decorator(**kwargs)(func)
|
|
66
|
+
self._functions[spec.name] = spec
|
|
67
|
+
return spec
|
|
68
|
+
|
|
69
|
+
return wrapper
|
|
70
|
+
|
|
71
|
+
def endpoint(self, **kwargs: Any):
|
|
72
|
+
"""Register a persistent endpoint with this app."""
|
|
73
|
+
from velar.decorators import endpoint as ep_decorator
|
|
74
|
+
|
|
75
|
+
def wrapper(func):
|
|
76
|
+
spec = ep_decorator(**kwargs)(func)
|
|
77
|
+
self._endpoints[spec.name] = spec
|
|
78
|
+
return spec
|
|
79
|
+
|
|
80
|
+
return wrapper
|
|
81
|
+
|
|
82
|
+
def local_entrypoint(self):
|
|
83
|
+
"""Mark a function as the local entry point for ``velar run``.
|
|
84
|
+
|
|
85
|
+
Usage::
|
|
86
|
+
|
|
87
|
+
@app.local_entrypoint()
|
|
88
|
+
def main():
|
|
89
|
+
result = my_function.remote(some_data)
|
|
90
|
+
print(result)
|
|
91
|
+
|
|
92
|
+
Then run with::
|
|
93
|
+
|
|
94
|
+
velar run my_module:app
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def decorator(func: Callable) -> Callable:
|
|
98
|
+
self._local_entrypoint = func
|
|
99
|
+
return func
|
|
100
|
+
|
|
101
|
+
return decorator
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def client(self) -> VelarClient:
|
|
105
|
+
if self._client is None:
|
|
106
|
+
self._client = VelarClient(Config.from_env())
|
|
107
|
+
return self._client
|
|
108
|
+
|
|
109
|
+
def deploy(self) -> dict[str, Any]:
|
|
110
|
+
"""Deploy all registered functions and endpoints.
|
|
111
|
+
|
|
112
|
+
This:
|
|
113
|
+
1. Generates handler wrappers for each function/endpoint
|
|
114
|
+
2. Builds Docker images (including the handler) for each unique image spec
|
|
115
|
+
3. Pushes images to registry
|
|
116
|
+
4. Creates deployments via the Velar API
|
|
117
|
+
5. Stores deployment_id and client refs on each spec so .remote() works
|
|
118
|
+
"""
|
|
119
|
+
all_specs: list[FunctionSpec | EndpointSpec] = [
|
|
120
|
+
*self._functions.values(),
|
|
121
|
+
*self._endpoints.values(),
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
if not all_specs:
|
|
125
|
+
console.print("[yellow]No functions or endpoints registered.[/yellow]")
|
|
126
|
+
return {}
|
|
127
|
+
|
|
128
|
+
console.print(f"\n[bold blue]Deploying app '{self.name}'[/bold blue]")
|
|
129
|
+
console.print(f" Functions : {len(self._functions)}")
|
|
130
|
+
console.print(f" Endpoints : {len(self._endpoints)}")
|
|
131
|
+
console.print()
|
|
132
|
+
|
|
133
|
+
results: dict[str, Any] = {}
|
|
134
|
+
|
|
135
|
+
for spec in all_specs:
|
|
136
|
+
label = f"[bold]{spec.name}[/bold] [dim]({spec.deployment_type}, {spec.gpu.name})[/dim]"
|
|
137
|
+
console.print(label)
|
|
138
|
+
|
|
139
|
+
# Step 1: Build and push Docker image (with handler baked in)
|
|
140
|
+
image_tag = self._build_and_push(spec)
|
|
141
|
+
if not image_tag:
|
|
142
|
+
console.print(f" [red]✗ Failed to build image for {spec.name}[/red]")
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
# Step 2: Create deployment (with spinner)
|
|
146
|
+
with Live(
|
|
147
|
+
Text.assemble((" ⟳ Creating deployment...", "yellow")),
|
|
148
|
+
console=console,
|
|
149
|
+
refresh_per_second=8,
|
|
150
|
+
transient=True,
|
|
151
|
+
):
|
|
152
|
+
try:
|
|
153
|
+
deployment = self.client.create_deployment(
|
|
154
|
+
deployment_type=spec.deployment_type,
|
|
155
|
+
gpu_type=spec.gpu.name,
|
|
156
|
+
image_url=image_tag,
|
|
157
|
+
entry_command=_HANDLER_ENTRY_COMMAND,
|
|
158
|
+
estimated_seconds=getattr(spec, "timeout", 0),
|
|
159
|
+
)
|
|
160
|
+
except Exception as e:
|
|
161
|
+
console.print(f" [red]✗ Failed: {e}[/red]")
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
results[spec.name] = deployment
|
|
165
|
+
|
|
166
|
+
# Wire up the spec so .remote() can find the deployment.
|
|
167
|
+
deployment_id = deployment["id"]
|
|
168
|
+
spec.deployment_id = deployment_id
|
|
169
|
+
spec.endpoint_url = deployment.get("endpoint_url")
|
|
170
|
+
spec._client = self.client
|
|
171
|
+
|
|
172
|
+
console.print(f" [green]✓ Deployment {deployment_id[:8]}[/green]")
|
|
173
|
+
|
|
174
|
+
if results:
|
|
175
|
+
console.print(f"\n[bold green]✓ Deployed {len(results)} workload(s)[/bold green]")
|
|
176
|
+
|
|
177
|
+
return results
|
|
178
|
+
|
|
179
|
+
# ------------------------------------------------------------------
|
|
180
|
+
# Handler generation
|
|
181
|
+
# ------------------------------------------------------------------
|
|
182
|
+
|
|
183
|
+
@staticmethod
|
|
184
|
+
def _generate_handler(spec: FunctionSpec | EndpointSpec) -> str:
|
|
185
|
+
"""Generate a Python HTTP handler script for the given spec.
|
|
186
|
+
|
|
187
|
+
The generated script:
|
|
188
|
+
- Embeds the user function source directly (no import needed).
|
|
189
|
+
- Starts a stdlib HTTP server on port 8000.
|
|
190
|
+
- Accepts POST with ``{"args": [...], "kwargs": {...}}``.
|
|
191
|
+
- Returns ``{"result": <return_value>}`` as JSON.
|
|
192
|
+
- Provides a GET health-check at any path.
|
|
193
|
+
"""
|
|
194
|
+
# Extract the raw source of the decorated function and dedent it
|
|
195
|
+
# so it sits at the top level of the generated script.
|
|
196
|
+
func_source = textwrap.dedent(inspect.getsource(spec.func))
|
|
197
|
+
func_name = spec.name
|
|
198
|
+
|
|
199
|
+
parts = [
|
|
200
|
+
f"# Auto-generated Velar handler for function '{func_name}'",
|
|
201
|
+
"# " + "-" * 55,
|
|
202
|
+
"import json",
|
|
203
|
+
"import sys",
|
|
204
|
+
"import traceback",
|
|
205
|
+
"from http.server import HTTPServer, BaseHTTPRequestHandler",
|
|
206
|
+
"",
|
|
207
|
+
"# ---------- user function ----------",
|
|
208
|
+
func_source.rstrip(),
|
|
209
|
+
"# -----------------------------------",
|
|
210
|
+
"",
|
|
211
|
+
"",
|
|
212
|
+
"class _VelarHandler(BaseHTTPRequestHandler):",
|
|
213
|
+
" def do_POST(self):",
|
|
214
|
+
" try:",
|
|
215
|
+
' content_length = int(self.headers.get("Content-Length", 0))',
|
|
216
|
+
" body = json.loads(self.rfile.read(content_length))",
|
|
217
|
+
' args = body.get("args", [])',
|
|
218
|
+
' kwargs = body.get("kwargs", {})',
|
|
219
|
+
f" result = {func_name}(*args, **kwargs)",
|
|
220
|
+
' response = json.dumps({"result": result})',
|
|
221
|
+
" self.send_response(200)",
|
|
222
|
+
" except Exception:",
|
|
223
|
+
' response = json.dumps({"error": traceback.format_exc()})',
|
|
224
|
+
" self.send_response(500)",
|
|
225
|
+
' self.send_header("Content-Type", "application/json")',
|
|
226
|
+
" self.end_headers()",
|
|
227
|
+
" self.wfile.write(response.encode())",
|
|
228
|
+
"",
|
|
229
|
+
" def do_GET(self):",
|
|
230
|
+
" self.send_response(200)",
|
|
231
|
+
' self.send_header("Content-Type", "application/json")',
|
|
232
|
+
" self.end_headers()",
|
|
233
|
+
" self.wfile.write(b'{\"status\":\"ready\"}')",
|
|
234
|
+
"",
|
|
235
|
+
" def log_message(self, fmt, *args):",
|
|
236
|
+
" sys.stderr.write(fmt % args + '\\n')",
|
|
237
|
+
"",
|
|
238
|
+
"",
|
|
239
|
+
'if __name__ == "__main__":',
|
|
240
|
+
' server = HTTPServer(("0.0.0.0", 8000), _VelarHandler)',
|
|
241
|
+
' print("Velar handler listening on 0.0.0.0:8000", flush=True)',
|
|
242
|
+
" server.serve_forever()",
|
|
243
|
+
"",
|
|
244
|
+
]
|
|
245
|
+
return "\n".join(parts)
|
|
246
|
+
|
|
247
|
+
# ------------------------------------------------------------------
|
|
248
|
+
# Docker build helpers
|
|
249
|
+
# ------------------------------------------------------------------
|
|
250
|
+
|
|
251
|
+
def _build_and_push(self, spec: FunctionSpec | EndpointSpec) -> str | None:
|
|
252
|
+
"""Build Docker image and push to registry. Returns image tag."""
|
|
253
|
+
tag = f"velar/{self.name}/{spec.name}:latest"
|
|
254
|
+
|
|
255
|
+
dockerfile_content = spec.image.to_dockerfile()
|
|
256
|
+
|
|
257
|
+
# Append a COPY instruction for the generated handler so it lands
|
|
258
|
+
# at /app/_velar_handler.py inside the container.
|
|
259
|
+
dockerfile_content += f"\nCOPY {_HANDLER_FILENAME} /app/{_HANDLER_FILENAME}\n"
|
|
260
|
+
|
|
261
|
+
handler_source = self._generate_handler(spec)
|
|
262
|
+
|
|
263
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
264
|
+
# Write Dockerfile
|
|
265
|
+
dockerfile_path = os.path.join(tmpdir, "Dockerfile")
|
|
266
|
+
with open(dockerfile_path, "w") as f:
|
|
267
|
+
f.write(dockerfile_content)
|
|
268
|
+
|
|
269
|
+
# Write the handler script next to the Dockerfile so COPY works.
|
|
270
|
+
handler_path = os.path.join(tmpdir, _HANDLER_FILENAME)
|
|
271
|
+
with open(handler_path, "w") as f:
|
|
272
|
+
f.write(handler_source)
|
|
273
|
+
|
|
274
|
+
with Live(
|
|
275
|
+
Text.assemble((" ⟳ Building image ", "yellow"), (tag, "cyan"), ("...", "yellow")),
|
|
276
|
+
console=console,
|
|
277
|
+
refresh_per_second=8,
|
|
278
|
+
transient=True,
|
|
279
|
+
):
|
|
280
|
+
try:
|
|
281
|
+
result = subprocess.run(
|
|
282
|
+
["docker", "build", "-t", tag, "-f", dockerfile_path, tmpdir],
|
|
283
|
+
capture_output=True,
|
|
284
|
+
text=True,
|
|
285
|
+
timeout=300,
|
|
286
|
+
)
|
|
287
|
+
except FileNotFoundError:
|
|
288
|
+
console.print(" [red]✗ Docker not found. Install Docker to deploy.[/red]")
|
|
289
|
+
return None
|
|
290
|
+
except subprocess.TimeoutExpired:
|
|
291
|
+
console.print(" [red]✗ Docker build timed out (5 min)[/red]")
|
|
292
|
+
return None
|
|
293
|
+
|
|
294
|
+
if result.returncode != 0:
|
|
295
|
+
console.print(f" [red]✗ Docker build failed:[/red]\n{result.stderr[:400]}")
|
|
296
|
+
return None
|
|
297
|
+
console.print(f" [green]✓ Image built[/green] [dim]{tag}[/dim]")
|
|
298
|
+
|
|
299
|
+
with Live(
|
|
300
|
+
Text.assemble((" ⟳ Pushing image...", "yellow")),
|
|
301
|
+
console=console,
|
|
302
|
+
refresh_per_second=8,
|
|
303
|
+
transient=True,
|
|
304
|
+
):
|
|
305
|
+
try:
|
|
306
|
+
result = subprocess.run(
|
|
307
|
+
["docker", "push", tag],
|
|
308
|
+
capture_output=True,
|
|
309
|
+
text=True,
|
|
310
|
+
timeout=300,
|
|
311
|
+
)
|
|
312
|
+
except subprocess.TimeoutExpired:
|
|
313
|
+
console.print(" [red]✗ Docker push timed out (5 min)[/red]")
|
|
314
|
+
return None
|
|
315
|
+
|
|
316
|
+
if result.returncode != 0:
|
|
317
|
+
console.print(f" [red]✗ Docker push failed:[/red] {result.stderr[:200]}")
|
|
318
|
+
return None
|
|
319
|
+
console.print(" [green]✓ Image pushed[/green]")
|
|
320
|
+
|
|
321
|
+
return tag
|
|
322
|
+
|
|
323
|
+
def status(self) -> None:
|
|
324
|
+
"""Print status of all deployments."""
|
|
325
|
+
deployments = self.client.list_deployments()
|
|
326
|
+
|
|
327
|
+
table = Table(title=f"Deployments - {self.name}")
|
|
328
|
+
table.add_column("ID", style="cyan")
|
|
329
|
+
table.add_column("Type")
|
|
330
|
+
table.add_column("GPU")
|
|
331
|
+
table.add_column("Status")
|
|
332
|
+
table.add_column("Cost")
|
|
333
|
+
|
|
334
|
+
for d in deployments:
|
|
335
|
+
status_color = {
|
|
336
|
+
"running": "green",
|
|
337
|
+
"pending": "yellow",
|
|
338
|
+
"provisioning": "yellow",
|
|
339
|
+
"completed": "blue",
|
|
340
|
+
"failed": "red",
|
|
341
|
+
"cancelled": "dim",
|
|
342
|
+
}.get(d["status"], "white")
|
|
343
|
+
|
|
344
|
+
actual = d.get("actual_cost")
|
|
345
|
+
cost_str = f"${actual:.4f}" if actual else f"~${d.get('cost_reserved', 0):.4f}"
|
|
346
|
+
|
|
347
|
+
table.add_row(
|
|
348
|
+
d["id"][:8],
|
|
349
|
+
d["type"],
|
|
350
|
+
d["gpu_type"],
|
|
351
|
+
f"[{status_color}]{d['status']}[/{status_color}]",
|
|
352
|
+
cost_str,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
console.print(table)
|