oneinfer-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oneinfer_cli-0.1.0/.gitignore +221 -0
- oneinfer_cli-0.1.0/PKG-INFO +53 -0
- oneinfer_cli-0.1.0/README.md +40 -0
- oneinfer_cli-0.1.0/pyproject.toml +26 -0
- oneinfer_cli-0.1.0/src/oneinfer_cli/auth.py +70 -0
- oneinfer_cli-0.1.0/src/oneinfer_cli/main.py +78 -0
- oneinfer_cli-0.1.0/src/oneinfer_cli/ssh_utils.py +70 -0
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
pip-wheel-metadata/
|
|
24
|
+
share/python-wheels/
|
|
25
|
+
*.egg-info/
|
|
26
|
+
.installed.cfg
|
|
27
|
+
*.egg
|
|
28
|
+
MANIFEST
|
|
29
|
+
|
|
30
|
+
# PyInstaller
|
|
31
|
+
# Usually these files are written by a python script from a template
|
|
32
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
33
|
+
*.manifest
|
|
34
|
+
*.spec
|
|
35
|
+
|
|
36
|
+
# Installer logs
|
|
37
|
+
pip-log.txt
|
|
38
|
+
pip-delete-this-directory.txt
|
|
39
|
+
|
|
40
|
+
# Unit test / coverage reports
|
|
41
|
+
htmlcov/
|
|
42
|
+
.tox/
|
|
43
|
+
.nox/
|
|
44
|
+
.coverage
|
|
45
|
+
.coverage.*
|
|
46
|
+
.cache
|
|
47
|
+
nosetests.xml
|
|
48
|
+
coverage.xml
|
|
49
|
+
*.cover
|
|
50
|
+
*.py,cover
|
|
51
|
+
.hypothesis/
|
|
52
|
+
.pytest_cache/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
target/
|
|
76
|
+
|
|
77
|
+
# Jupyter Notebook
|
|
78
|
+
.ipynb_checkpoints
|
|
79
|
+
|
|
80
|
+
# IPython
|
|
81
|
+
profile_default/
|
|
82
|
+
ipython_config.py
|
|
83
|
+
|
|
84
|
+
# pyenv
|
|
85
|
+
.python-version
|
|
86
|
+
|
|
87
|
+
# pipenv
|
|
88
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
89
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
90
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
91
|
+
# install all needed dependencies.
|
|
92
|
+
#Pipfile.lock
|
|
93
|
+
|
|
94
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
|
95
|
+
__pypackages__/
|
|
96
|
+
|
|
97
|
+
# Celery stuff
|
|
98
|
+
celerybeat-schedule
|
|
99
|
+
celerybeat.pid
|
|
100
|
+
|
|
101
|
+
# SageMath parsed files
|
|
102
|
+
*.sage.py
|
|
103
|
+
|
|
104
|
+
# Environments
|
|
105
|
+
.env
|
|
106
|
+
.venv
|
|
107
|
+
env/
|
|
108
|
+
venv/
|
|
109
|
+
ENV/
|
|
110
|
+
env.bak/
|
|
111
|
+
venv.bak/
|
|
112
|
+
|
|
113
|
+
# Spyder project settings
|
|
114
|
+
.spyderproject
|
|
115
|
+
.spyproject
|
|
116
|
+
|
|
117
|
+
# Rope project settings
|
|
118
|
+
.ropeproject
|
|
119
|
+
|
|
120
|
+
# mkdocs documentation
|
|
121
|
+
/site
|
|
122
|
+
|
|
123
|
+
# mypy
|
|
124
|
+
.mypy_cache/
|
|
125
|
+
.dmypy.json
|
|
126
|
+
dmypy.json
|
|
127
|
+
|
|
128
|
+
# Pyre type checker
|
|
129
|
+
.pyre/
|
|
130
|
+
|
|
131
|
+
# IDE
|
|
132
|
+
.vscode/
|
|
133
|
+
.idea/
|
|
134
|
+
*.swp
|
|
135
|
+
*.swo
|
|
136
|
+
*~
|
|
137
|
+
|
|
138
|
+
# OS
|
|
139
|
+
.DS_Store
|
|
140
|
+
.DS_Store?
|
|
141
|
+
._*
|
|
142
|
+
.Spotlight-V100
|
|
143
|
+
.Trashes
|
|
144
|
+
ehthumbs.db
|
|
145
|
+
Thumbs.db
|
|
146
|
+
|
|
147
|
+
# Logs
|
|
148
|
+
logs/
|
|
149
|
+
*.log
|
|
150
|
+
.logs/
|
|
151
|
+
src/.screenshots/
|
|
152
|
+
yolov5/
|
|
153
|
+
|
|
154
|
+
# Keys and certificates
|
|
155
|
+
*.pem
|
|
156
|
+
logs/gpu_monitor.log
|
|
157
|
+
*.key
|
|
158
|
+
*.crt
|
|
159
|
+
*.p12
|
|
160
|
+
*.pfx
|
|
161
|
+
*.jks
|
|
162
|
+
*.keystore
|
|
163
|
+
|
|
164
|
+
# Secrets and credentials
|
|
165
|
+
.secrets/
|
|
166
|
+
secrets/
|
|
167
|
+
credentials/
|
|
168
|
+
*.secret
|
|
169
|
+
*.credentials
|
|
170
|
+
|
|
171
|
+
# Database
|
|
172
|
+
*.db
|
|
173
|
+
*.sqlite
|
|
174
|
+
*.sqlite3
|
|
175
|
+
|
|
176
|
+
# Temporary files
|
|
177
|
+
*.tmp
|
|
178
|
+
*.temp
|
|
179
|
+
.cache/
|
|
180
|
+
|
|
181
|
+
# OS generated files
|
|
182
|
+
.DS_Store
|
|
183
|
+
.DS_Store?
|
|
184
|
+
._*
|
|
185
|
+
.Spotlight-V100
|
|
186
|
+
.Trashes
|
|
187
|
+
ehthumbs.db
|
|
188
|
+
Thumbs.db
|
|
189
|
+
|
|
190
|
+
# IDE and editor files
|
|
191
|
+
.vscode/settings.json
|
|
192
|
+
.idea/
|
|
193
|
+
*.swp
|
|
194
|
+
*.swo
|
|
195
|
+
*~
|
|
196
|
+
|
|
197
|
+
# Logs and screenshots
|
|
198
|
+
.logs/
|
|
199
|
+
src/.logs/
|
|
200
|
+
src/.screenshots/
|
|
201
|
+
|
|
202
|
+
# Node modules (if any)
|
|
203
|
+
node_modules/
|
|
204
|
+
|
|
205
|
+
# Docker
|
|
206
|
+
.dockerignore
|
|
207
|
+
|
|
208
|
+
# Sensitive documentation
|
|
209
|
+
CREDENTIAL_ROTATION.md
|
|
210
|
+
GITHUB_SECRETS_UPDATE.md
|
|
211
|
+
|
|
212
|
+
# Temporary files and test scripts
|
|
213
|
+
tmp/
|
|
214
|
+
test_script.ps1
|
|
215
|
+
patch_mongo_uri.ps1
|
|
216
|
+
tests/local/
|
|
217
|
+
|
|
218
|
+
# Base64 encoded secrets
|
|
219
|
+
*.b64
|
|
220
|
+
|
|
221
|
+
CUsersUser.kubeconfig
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: oneinfer-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Seamlessly deploy local Docker containers to OneInfer GPU instances over SSH.
|
|
5
|
+
Author-email: OneInfer <hello@oneinfer.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Requires-Python: >=3.8
|
|
8
|
+
Requires-Dist: paramiko>=3.0.0
|
|
9
|
+
Requires-Dist: requests>=2.30.0
|
|
10
|
+
Requires-Dist: rich>=13.0.0
|
|
11
|
+
Requires-Dist: typer>=0.9.0
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
|
|
14
|
+
# OneInfer Developer CLI 🚀
|
|
15
|
+
|
|
16
|
+
The official, open-source Developer CLI for seamlessly testing and deploying local Docker containers to OneInfer GPU instances over SSH.
|
|
17
|
+
|
|
18
|
+
> ⚠️ Note: OneInfer GPU instances are secure, unprivileged containers. They do not run a full `dockerd` daemon. Therefore, you cannot simply `docker run` inside them. This CLI handles the complexity of tunneling your image and running it via user-space container runtimes (`udocker`) automatically!
|
|
19
|
+
|
|
20
|
+
## Installation
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
pip install oneinfer-cli
|
|
24
|
+
```
|
|
25
|
+
*(Optionally use `pipx install oneinfer-cli` for global isolation).*
|
|
26
|
+
|
|
27
|
+
## Getting Started
|
|
28
|
+
|
|
29
|
+
1. **Login**: Authenticate your CLI with your OneInfer Account.
|
|
30
|
+
```bash
|
|
31
|
+
oneinfer login
|
|
32
|
+
# Paste your API Token when prompted
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
2. **Build Your AI App**: Build your standard Docker container locally.
|
|
36
|
+
```bash
|
|
37
|
+
docker build -t my-custom-llm:v1 .
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
3. **Deploy to a GPU Instance**: Push and run it directly to your running OneInfer instance.
|
|
41
|
+
```bash
|
|
42
|
+
oneinfer deploy my-custom-llm:v1 --instance-id xyz-1234
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### What happens under the hood?
|
|
46
|
+
When you run `deploy`, the CLI:
|
|
47
|
+
1. Talks to the OneInfer API to securely fetch your instance's SSH dynamically.
|
|
48
|
+
2. Runs `docker save` locally to bundle your image.
|
|
49
|
+
3. Transmits the bundle over SSH via SCP.
|
|
50
|
+
4. Instructs the remote instance to load and run the container securely in user-space with full GPU passthrough using `udocker run --nv my-custom-llm:v1`.
|
|
51
|
+
|
|
52
|
+
## Contributing
|
|
53
|
+
We welcome PRs! Ensure you have `hatch` installed, and run `pip install -e .` to develop locally.
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# OneInfer Developer CLI 🚀
|
|
2
|
+
|
|
3
|
+
The official, open-source Developer CLI for seamlessly testing and deploying local Docker containers to OneInfer GPU instances over SSH.
|
|
4
|
+
|
|
5
|
+
> ⚠️ Note: OneInfer GPU instances are secure, unprivileged containers. They do not run a full `dockerd` daemon. Therefore, you cannot simply `docker run` inside them. This CLI handles the complexity of tunneling your image and running it via user-space container runtimes (`udocker`) automatically!
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install oneinfer-cli
|
|
11
|
+
```
|
|
12
|
+
*(Optionally use `pipx install oneinfer-cli` for global isolation).*
|
|
13
|
+
|
|
14
|
+
## Getting Started
|
|
15
|
+
|
|
16
|
+
1. **Login**: Authenticate your CLI with your OneInfer Account.
|
|
17
|
+
```bash
|
|
18
|
+
oneinfer login
|
|
19
|
+
# Paste your API Token when prompted
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
2. **Build Your AI App**: Build your standard Docker container locally.
|
|
23
|
+
```bash
|
|
24
|
+
docker build -t my-custom-llm:v1 .
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
3. **Deploy to a GPU Instance**: Push and run it directly to your running OneInfer instance.
|
|
28
|
+
```bash
|
|
29
|
+
oneinfer deploy my-custom-llm:v1 --instance-id xyz-1234
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### What happens under the hood?
|
|
33
|
+
When you run `deploy`, the CLI:
|
|
34
|
+
1. Talks to the OneInfer API to securely fetch your instance's SSH dynamically.
|
|
35
|
+
2. Runs `docker save` locally to bundle your image.
|
|
36
|
+
3. Transmits the bundle over SSH via SCP.
|
|
37
|
+
4. Instructs the remote instance to load and run the container securely in user-space with full GPU passthrough using `udocker run --nv my-custom-llm:v1`.
|
|
38
|
+
|
|
39
|
+
## Contributing
|
|
40
|
+
We welcome PRs! Ensure you have `hatch` installed, and run `pip install -e .` to develop locally.
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "oneinfer-cli"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Seamlessly deploy local Docker containers to OneInfer GPU instances over SSH."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = "MIT"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "OneInfer", email = "hello@oneinfer.com" }
|
|
14
|
+
]
|
|
15
|
+
dependencies = [
|
|
16
|
+
"typer>=0.9.0",
|
|
17
|
+
"rich>=13.0.0",
|
|
18
|
+
"requests>=2.30.0",
|
|
19
|
+
"paramiko>=3.0.0"
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
[project.scripts]
|
|
23
|
+
oneinfer = "oneinfer_cli.main:app"
|
|
24
|
+
|
|
25
|
+
[tool.hatch.build.targets.wheel]
|
|
26
|
+
packages = ["src/oneinfer_cli"]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import requests
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
CONFIG_DIR = Path.home() / ".oneinfer"
|
|
7
|
+
CONFIG_FILE = CONFIG_DIR / "config.json"
|
|
8
|
+
API_BASE_URL = "https://api.oneinfer.com/v1" # Production URL or Staging URL
|
|
9
|
+
|
|
10
|
+
def save_token(token: str) -> bool:
|
|
11
|
+
try:
|
|
12
|
+
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
|
13
|
+
config = {}
|
|
14
|
+
if CONFIG_FILE.exists():
|
|
15
|
+
with open(CONFIG_FILE, "r") as f:
|
|
16
|
+
config = json.load(f)
|
|
17
|
+
|
|
18
|
+
config["api_key"] = token
|
|
19
|
+
|
|
20
|
+
with open(CONFIG_FILE, "w") as f:
|
|
21
|
+
json.dump(config, f)
|
|
22
|
+
|
|
23
|
+
# Secure the file (Unix only)
|
|
24
|
+
if os.name != 'nt':
|
|
25
|
+
os.chmod(CONFIG_FILE, 0o600)
|
|
26
|
+
|
|
27
|
+
return True
|
|
28
|
+
except Exception as e:
|
|
29
|
+
print(f"Error saving config: {e}")
|
|
30
|
+
return False
|
|
31
|
+
|
|
32
|
+
def get_token() -> str:
|
|
33
|
+
try:
|
|
34
|
+
if CONFIG_FILE.exists():
|
|
35
|
+
with open(CONFIG_FILE, "r") as f:
|
|
36
|
+
config = json.load(f)
|
|
37
|
+
return config.get("api_key")
|
|
38
|
+
return os.environ.get("ONEINFER_API_KEY") # Fallback to ENV Var
|
|
39
|
+
except Exception:
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
def fetch_instance_ssh_details(token: str, instance_id: str):
|
|
43
|
+
"""Hits the /get-instance backend API to retrieve SSH host, port, password."""
|
|
44
|
+
# Note: In a real CLI, we might need the provider name or the backend handles wildcard ID lookup.
|
|
45
|
+
# We assume the user has a developer_id, or the backend infers it from the api_key.
|
|
46
|
+
|
|
47
|
+
# Example logic using the actual OneInfer backend spec:
|
|
48
|
+
# A robust CLI would either require `provider_name` or the backend would provide a flat `/instances/{id}` route.
|
|
49
|
+
|
|
50
|
+
# Placeholder for the actual backend call:
|
|
51
|
+
headers = {"Authorization": f"Bearer {token}"}
|
|
52
|
+
try:
|
|
53
|
+
# Note: Replace with actual endpoint URL. Since the backend /get-instance route
|
|
54
|
+
# requires `developer_id` and `provider_name`, a dedicated CLI endpoint
|
|
55
|
+
# (e.g. /v1/cli/instance-ssh/{instance_id}) might be cleaner to add to backend later.
|
|
56
|
+
|
|
57
|
+
# For Demo/Mock purposes until we wire it precisely:
|
|
58
|
+
# response = requests.get(f"{API_BASE_URL}/instances/{instance_id}/ssh-details", headers=headers)
|
|
59
|
+
# response.raise_for_status()
|
|
60
|
+
# return response.json()
|
|
61
|
+
|
|
62
|
+
# Mock Response
|
|
63
|
+
return {
|
|
64
|
+
"ssh_host": "mock.gpu-instance.oneinfer.com",
|
|
65
|
+
"ssh_port": 22000,
|
|
66
|
+
"ssh_password": "mocked-secure-password"
|
|
67
|
+
}
|
|
68
|
+
except Exception as e:
|
|
69
|
+
print(f"API Error: {e}")
|
|
70
|
+
return None
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import typer
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.panel import Panel
|
|
6
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
7
|
+
|
|
8
|
+
from oneinfer_cli import auth, ssh_utils
|
|
9
|
+
|
|
10
|
+
app = typer.Typer(help="OneInfer Developer CLI: Seamlessly push and deploy Docker containers to your GPU.")
|
|
11
|
+
console = Console()
|
|
12
|
+
|
|
13
|
+
@app.command()
|
|
14
|
+
def login(api_key: str = typer.Option(..., prompt=True, hide_input=True, help="Your OneInfer API Token")):
|
|
15
|
+
"""Authenticate the CLI with your OneInfer account."""
|
|
16
|
+
if auth.save_token(api_key):
|
|
17
|
+
console.print("[bold green]✅ Login successful![/bold green] Credentials saved.")
|
|
18
|
+
else:
|
|
19
|
+
console.print("[bold red]❌ Failed to save credentials.[/bold red]")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@app.command()
|
|
23
|
+
def deploy(
|
|
24
|
+
image: str = typer.Argument(..., help="Local Docker image tag (e.g., my-app:latest)"),
|
|
25
|
+
instance_id: str = typer.Option(..., "--instance-id", "-i", help="OneInfer GPU Instance ID")
|
|
26
|
+
):
|
|
27
|
+
"""Deploy a local Docker container directly to a remote GPU Instance."""
|
|
28
|
+
token = auth.get_token()
|
|
29
|
+
if not token:
|
|
30
|
+
console.print("[bold red]❌ You must be logged in. Run `oneinfer login` first.[/bold red]")
|
|
31
|
+
raise typer.Exit(code=1)
|
|
32
|
+
|
|
33
|
+
console.print(Panel(f"🚀 Deploying [bold cyan]{image}[/bold cyan] to instance [bold yellow]{instance_id}[/bold yellow]"))
|
|
34
|
+
|
|
35
|
+
with Progress(
|
|
36
|
+
SpinnerColumn(),
|
|
37
|
+
TextColumn("[progress.description]{task.description}"),
|
|
38
|
+
transient=False,
|
|
39
|
+
) as progress:
|
|
40
|
+
|
|
41
|
+
# 1. Fetch SSH Info
|
|
42
|
+
task_fetch = progress.add_task("Fetching instance credentials...", total=None)
|
|
43
|
+
instance_details = auth.fetch_instance_ssh_details(token, instance_id)
|
|
44
|
+
if not instance_details:
|
|
45
|
+
progress.update(task_fetch, completed=True, description="[red]❌ Failed to get instance details.[/red]")
|
|
46
|
+
raise typer.Exit(code=1)
|
|
47
|
+
progress.update(task_fetch, completed=True, description="[green]✅ Instance reachable.[/green]")
|
|
48
|
+
|
|
49
|
+
# 2. Export Docker Image
|
|
50
|
+
task_export = progress.add_task(f"Exporting local image [bold]{image}[/bold]...", total=None)
|
|
51
|
+
tar_path = ssh_utils.export_docker_image(image)
|
|
52
|
+
if not tar_path:
|
|
53
|
+
progress.update(task_export, completed=True, description="[red]❌ Failed to export docker image. Is Docker running?[/red]")
|
|
54
|
+
raise typer.Exit(code=1)
|
|
55
|
+
progress.update(task_export, completed=True, description="[green]✅ Image bundled.[/green]")
|
|
56
|
+
|
|
57
|
+
# 3. Transfer & Load
|
|
58
|
+
task_upload = progress.add_task("Uploading and loading container to GPU instance over SSH...", total=None)
|
|
59
|
+
success = ssh_utils.transfer_and_load_image(
|
|
60
|
+
tar_path=tar_path,
|
|
61
|
+
image_name=image,
|
|
62
|
+
ssh_host=instance_details['ssh_host'],
|
|
63
|
+
ssh_port=instance_details['ssh_port'],
|
|
64
|
+
ssh_password=instance_details['ssh_password']
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if not success:
|
|
68
|
+
progress.update(task_upload, completed=True, description="[red]❌ Deployment failed.[/red]")
|
|
69
|
+
raise typer.Exit(code=1)
|
|
70
|
+
|
|
71
|
+
progress.update(task_upload, completed=True, description="[green]✅ Container uploaded and loaded![/green]")
|
|
72
|
+
|
|
73
|
+
console.print("\n[bold green]🎉 Deployment Successful![/bold green]")
|
|
74
|
+
console.print(f"To run your app, SSH into your instance and run:\n > [bold cyan]udocker run --nv {image}[/bold cyan]")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if __name__ == "__main__":
|
|
78
|
+
app()
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import os
|
|
3
|
+
import paramiko
|
|
4
|
+
from scp import SCPClient
|
|
5
|
+
|
|
6
|
+
def export_docker_image(image_name: str, output_tar: str = "image.tar") -> str:
|
|
7
|
+
"""Exports a local Docker image to a tarball."""
|
|
8
|
+
print(f"Exporting local Docker image '{image_name}'...")
|
|
9
|
+
try:
|
|
10
|
+
# We can pipe through gzip later, but tar is simpler to demonstrate
|
|
11
|
+
result = subprocess.run(
|
|
12
|
+
["docker", "save", "-o", output_tar, image_name],
|
|
13
|
+
stdout=subprocess.PIPE,
|
|
14
|
+
stderr=subprocess.PIPE,
|
|
15
|
+
text=True
|
|
16
|
+
)
|
|
17
|
+
if result.returncode != 0:
|
|
18
|
+
print(f"Docker save failed: {result.stderr}")
|
|
19
|
+
return None
|
|
20
|
+
return output_tar
|
|
21
|
+
except Exception as e:
|
|
22
|
+
print(f"Failed to find or run docker: {e}")
|
|
23
|
+
return None
|
|
24
|
+
|
|
25
|
+
def create_ssh_client(server, port, user, password):
|
|
26
|
+
client = paramiko.SSHClient()
|
|
27
|
+
client.load_system_host_keys()
|
|
28
|
+
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
29
|
+
client.connect(server, port=port, username=user, password=password)
|
|
30
|
+
return client
|
|
31
|
+
|
|
32
|
+
def transfer_and_load_image(tar_path: str, image_name: str, ssh_host: str, ssh_port: int, ssh_password: str) -> bool:
|
|
33
|
+
"""Uploads the tarball over SSH and loads it using udocker on the remote instance."""
|
|
34
|
+
ssh = None
|
|
35
|
+
try:
|
|
36
|
+
ssh = create_ssh_client(ssh_host, ssh_port, "root", ssh_password)
|
|
37
|
+
|
|
38
|
+
# 1. SCP Upload
|
|
39
|
+
print(f"Uploading {tar_path} to {ssh_host}:{ssh_port} via SCP...")
|
|
40
|
+
with SCPClient(ssh.get_transport()) as scp:
|
|
41
|
+
scp.put(tar_path, f"/tmp/{tar_path}")
|
|
42
|
+
|
|
43
|
+
print("Upload complete. Loading image into remote user-space registry (udocker)...")
|
|
44
|
+
|
|
45
|
+
# 2. Remote Udocker Load
|
|
46
|
+
# udocker command is pre-installed via the OneInfer startup_script injected during create-instance
|
|
47
|
+
udocker_cmd = f"udocker load -i /tmp/{tar_path}"
|
|
48
|
+
stdin, stdout, stderr = ssh.exec_command(udocker_cmd)
|
|
49
|
+
|
|
50
|
+
# Wait for command to finish
|
|
51
|
+
exit_status = stdout.channel.recv_exit_status()
|
|
52
|
+
if exit_status != 0:
|
|
53
|
+
err = stderr.read().decode('utf-8')
|
|
54
|
+
print(f"Remote udocker load failed: {err}")
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
# 3. Clean up remote tarball
|
|
58
|
+
ssh.exec_command(f"rm /tmp/{tar_path}")
|
|
59
|
+
|
|
60
|
+
# Clean up local tarball
|
|
61
|
+
if os.path.exists(tar_path):
|
|
62
|
+
os.remove(tar_path)
|
|
63
|
+
|
|
64
|
+
return True
|
|
65
|
+
except Exception as e:
|
|
66
|
+
print(f"SSH Transfer completely failed: {e}")
|
|
67
|
+
return False
|
|
68
|
+
finally:
|
|
69
|
+
if ssh:
|
|
70
|
+
ssh.close()
|