@magpiecloud/mags 1.8.13 → 1.8.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +95 -378
- package/bin/mags.js +196 -104
- package/index.js +6 -52
- package/package.json +22 -4
- package/API.md +0 -388
- package/Mags-API.postman_collection.json +0 -374
- package/QUICKSTART.md +0 -295
- package/deploy-page.sh +0 -171
- package/mags +0 -0
- package/mags.sh +0 -270
- package/nodejs/README.md +0 -197
- package/nodejs/bin/mags.js +0 -1146
- package/nodejs/index.js +0 -642
- package/nodejs/package.json +0 -42
- package/python/INTEGRATION.md +0 -800
- package/python/README.md +0 -161
- package/python/dist/magpie_mags-1.3.5-py3-none-any.whl +0 -0
- package/python/dist/magpie_mags-1.3.5.tar.gz +0 -0
- package/python/examples/demo.py +0 -181
- package/python/pyproject.toml +0 -39
- package/python/src/magpie_mags.egg-info/PKG-INFO +0 -182
- package/python/src/magpie_mags.egg-info/SOURCES.txt +0 -9
- package/python/src/magpie_mags.egg-info/dependency_links.txt +0 -1
- package/python/src/magpie_mags.egg-info/requires.txt +0 -1
- package/python/src/magpie_mags.egg-info/top_level.txt +0 -1
- package/python/src/mags/__init__.py +0 -6
- package/python/src/mags/client.py +0 -573
- package/python/test_sdk.py +0 -78
- package/skill.md +0 -153
- package/website/api.html +0 -1095
- package/website/claude-skill.html +0 -481
- package/website/cookbook/hn-marketing.html +0 -410
- package/website/cookbook/hn-marketing.sh +0 -42
- package/website/cookbook.html +0 -282
- package/website/env.js +0 -4
- package/website/index.html +0 -801
- package/website/llms.txt +0 -334
- package/website/login.html +0 -108
- package/website/mags.md +0 -210
- package/website/script.js +0 -453
- package/website/styles.css +0 -908
- package/website/tokens.html +0 -169
- package/website/usage.html +0 -185
|
@@ -1,573 +0,0 @@
|
|
|
1
|
-
"""Mags client for interacting with the Magpie VM infrastructure API."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import os
|
|
6
|
-
import subprocess
|
|
7
|
-
import tempfile
|
|
8
|
-
import time
|
|
9
|
-
from pathlib import Path
|
|
10
|
-
from typing import Any, Dict, List, Optional
|
|
11
|
-
|
|
12
|
-
import requests
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class MagsError(Exception):
|
|
16
|
-
"""Raised when the Mags API returns an error."""
|
|
17
|
-
|
|
18
|
-
def __init__(self, message: str, status_code: int | None = None):
|
|
19
|
-
super().__init__(message)
|
|
20
|
-
self.status_code = status_code
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class Mags:
|
|
24
|
-
"""Client for the Mags API.
|
|
25
|
-
|
|
26
|
-
Args:
|
|
27
|
-
api_token: API token. Falls back to ``MAGS_API_TOKEN`` or ``MAGS_TOKEN`` env vars.
|
|
28
|
-
api_url: API base URL. Falls back to ``MAGS_API_URL`` env var or
|
|
29
|
-
``https://api.magpiecloud.com``.
|
|
30
|
-
timeout: Default request timeout in seconds.
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
api_token: str | None = None,
|
|
36
|
-
api_url: str | None = None,
|
|
37
|
-
timeout: int = 30,
|
|
38
|
-
):
|
|
39
|
-
self.api_url = (
|
|
40
|
-
api_url
|
|
41
|
-
or os.environ.get("MAGS_API_URL")
|
|
42
|
-
or "https://api.magpiecloud.com"
|
|
43
|
-
).rstrip("/")
|
|
44
|
-
|
|
45
|
-
self.api_token = (
|
|
46
|
-
api_token
|
|
47
|
-
or os.environ.get("MAGS_API_TOKEN")
|
|
48
|
-
or os.environ.get("MAGS_TOKEN")
|
|
49
|
-
)
|
|
50
|
-
if not self.api_token:
|
|
51
|
-
raise MagsError(
|
|
52
|
-
"API token required. Set MAGS_API_TOKEN env var or pass api_token."
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
self.timeout = timeout
|
|
56
|
-
self._session = requests.Session()
|
|
57
|
-
self._session.headers.update(
|
|
58
|
-
{
|
|
59
|
-
"Authorization": f"Bearer {self.api_token}",
|
|
60
|
-
"Content-Type": "application/json",
|
|
61
|
-
}
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
# ── helpers ──────────────────────────────────────────────────────
|
|
65
|
-
|
|
66
|
-
def _request(
|
|
67
|
-
self,
|
|
68
|
-
method: str,
|
|
69
|
-
path: str,
|
|
70
|
-
json: Any = None,
|
|
71
|
-
params: dict | None = None,
|
|
72
|
-
timeout: int | None = None,
|
|
73
|
-
) -> Any:
|
|
74
|
-
url = f"{self.api_url}/api/v1{path}"
|
|
75
|
-
resp = self._session.request(
|
|
76
|
-
method,
|
|
77
|
-
url,
|
|
78
|
-
json=json,
|
|
79
|
-
params=params,
|
|
80
|
-
timeout=timeout or self.timeout,
|
|
81
|
-
)
|
|
82
|
-
if resp.status_code >= 400:
|
|
83
|
-
try:
|
|
84
|
-
body = resp.json()
|
|
85
|
-
msg = body.get("error") or body.get("message") or resp.text
|
|
86
|
-
except Exception:
|
|
87
|
-
msg = resp.text
|
|
88
|
-
raise MagsError(msg, status_code=resp.status_code)
|
|
89
|
-
if not resp.content:
|
|
90
|
-
return {}
|
|
91
|
-
return resp.json()
|
|
92
|
-
|
|
93
|
-
# ── jobs ─────────────────────────────────────────────────────────
|
|
94
|
-
|
|
95
|
-
def run(
|
|
96
|
-
self,
|
|
97
|
-
script: str,
|
|
98
|
-
*,
|
|
99
|
-
name: str | None = None,
|
|
100
|
-
workspace_id: str | None = None,
|
|
101
|
-
base_workspace_id: str | None = None,
|
|
102
|
-
persistent: bool = False,
|
|
103
|
-
no_sleep: bool = False,
|
|
104
|
-
ephemeral: bool = False,
|
|
105
|
-
startup_command: str | None = None,
|
|
106
|
-
environment: Dict[str, str] | None = None,
|
|
107
|
-
file_ids: List[str] | None = None,
|
|
108
|
-
disk_gb: int | None = None,
|
|
109
|
-
no_sync: bool = False,
|
|
110
|
-
) -> dict:
|
|
111
|
-
"""Submit a job for execution.
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
script: Shell script to execute inside the VM.
|
|
115
|
-
name: Optional job name.
|
|
116
|
-
workspace_id: Persistent workspace name (synced to S3).
|
|
117
|
-
base_workspace_id: Read-only base workspace to mount.
|
|
118
|
-
persistent: Keep VM alive after script finishes.
|
|
119
|
-
no_sleep: Never auto-sleep this VM (requires persistent=True).
|
|
120
|
-
The VM stays running 24/7 and auto-recovers if its host goes down.
|
|
121
|
-
ephemeral: No S3 sync (faster, truly ephemeral).
|
|
122
|
-
startup_command: Command to run when VM wakes from sleep.
|
|
123
|
-
environment: Key-value env vars injected into the VM.
|
|
124
|
-
file_ids: File IDs to download into VM before script runs.
|
|
125
|
-
disk_gb: Custom disk size in GB (default 2GB).
|
|
126
|
-
no_sync: Skip S3 sync, use local disk only.
|
|
127
|
-
|
|
128
|
-
Returns ``{"request_id": ..., "status": "accepted"}``.
|
|
129
|
-
"""
|
|
130
|
-
if ephemeral and workspace_id:
|
|
131
|
-
raise MagsError("Cannot use ephemeral with workspace_id")
|
|
132
|
-
if ephemeral and persistent:
|
|
133
|
-
raise MagsError("Cannot use ephemeral with persistent")
|
|
134
|
-
if no_sleep and not persistent:
|
|
135
|
-
raise MagsError("no_sleep requires persistent=True")
|
|
136
|
-
|
|
137
|
-
payload = {
|
|
138
|
-
"script": script,
|
|
139
|
-
"type": "inline",
|
|
140
|
-
"persistent": persistent,
|
|
141
|
-
}
|
|
142
|
-
if no_sleep:
|
|
143
|
-
payload["no_sleep"] = True
|
|
144
|
-
if no_sync:
|
|
145
|
-
payload["no_sync"] = True
|
|
146
|
-
if name:
|
|
147
|
-
payload["name"] = name
|
|
148
|
-
if not ephemeral and workspace_id:
|
|
149
|
-
payload["workspace_id"] = workspace_id
|
|
150
|
-
if base_workspace_id:
|
|
151
|
-
payload["base_workspace_id"] = base_workspace_id
|
|
152
|
-
if startup_command:
|
|
153
|
-
payload["startup_command"] = startup_command
|
|
154
|
-
if environment:
|
|
155
|
-
payload["environment"] = environment
|
|
156
|
-
if file_ids:
|
|
157
|
-
payload["file_ids"] = file_ids
|
|
158
|
-
if disk_gb:
|
|
159
|
-
payload["disk_gb"] = disk_gb
|
|
160
|
-
|
|
161
|
-
return self._request("POST", "/mags-jobs", json=payload)
|
|
162
|
-
|
|
163
|
-
def run_and_wait(
|
|
164
|
-
self,
|
|
165
|
-
script: str,
|
|
166
|
-
*,
|
|
167
|
-
timeout: float = 60.0,
|
|
168
|
-
poll_interval: float = 1.0,
|
|
169
|
-
**run_kwargs: Any,
|
|
170
|
-
) -> dict:
|
|
171
|
-
"""Submit a job and block until it completes or times out.
|
|
172
|
-
|
|
173
|
-
Returns a dict with ``request_id``, ``status``, ``exit_code``,
|
|
174
|
-
``duration_ms``, and ``logs``.
|
|
175
|
-
"""
|
|
176
|
-
result = self.run(script, **run_kwargs)
|
|
177
|
-
request_id = result["request_id"]
|
|
178
|
-
|
|
179
|
-
deadline = time.monotonic() + timeout
|
|
180
|
-
while time.monotonic() < deadline:
|
|
181
|
-
status = self.status(request_id)
|
|
182
|
-
if status["status"] in ("completed", "error"):
|
|
183
|
-
logs_resp = self.logs(request_id)
|
|
184
|
-
return {
|
|
185
|
-
"request_id": request_id,
|
|
186
|
-
"status": status["status"],
|
|
187
|
-
"exit_code": status.get("exit_code", 0),
|
|
188
|
-
"duration_ms": status.get("script_duration_ms", 0),
|
|
189
|
-
"logs": logs_resp.get("logs", []),
|
|
190
|
-
}
|
|
191
|
-
time.sleep(poll_interval)
|
|
192
|
-
|
|
193
|
-
raise MagsError(f"Job {request_id} timed out after {timeout}s")
|
|
194
|
-
|
|
195
|
-
def status(self, request_id: str) -> dict:
|
|
196
|
-
"""Get the status of a job."""
|
|
197
|
-
return self._request("GET", f"/mags-jobs/{request_id}/status")
|
|
198
|
-
|
|
199
|
-
def logs(self, request_id: str) -> dict:
|
|
200
|
-
"""Get logs for a job. Returns ``{"logs": [...]}`."""
|
|
201
|
-
return self._request("GET", f"/mags-jobs/{request_id}/logs")
|
|
202
|
-
|
|
203
|
-
def list_jobs(self, *, page: int = 1, page_size: int = 20) -> dict:
|
|
204
|
-
"""List recent jobs. Returns ``{"jobs": [...], "total": N, ...}``."""
|
|
205
|
-
return self._request(
|
|
206
|
-
"GET", "/mags-jobs", params={"page": page, "page_size": page_size}
|
|
207
|
-
)
|
|
208
|
-
|
|
209
|
-
def update_job(
|
|
210
|
-
self,
|
|
211
|
-
request_id: str,
|
|
212
|
-
*,
|
|
213
|
-
startup_command: str | None = None,
|
|
214
|
-
no_sleep: bool | None = None,
|
|
215
|
-
) -> dict:
|
|
216
|
-
"""Update a job's settings.
|
|
217
|
-
|
|
218
|
-
Args:
|
|
219
|
-
request_id: The job/workspace ID to update.
|
|
220
|
-
startup_command: Command to run when VM wakes from sleep.
|
|
221
|
-
no_sleep: If True, VM never auto-sleeps. If False, re-enables auto-sleep.
|
|
222
|
-
"""
|
|
223
|
-
payload: dict = {}
|
|
224
|
-
if startup_command is not None:
|
|
225
|
-
payload["startup_command"] = startup_command
|
|
226
|
-
if no_sleep is not None:
|
|
227
|
-
payload["no_sleep"] = no_sleep
|
|
228
|
-
return self._request("PATCH", f"/mags-jobs/{request_id}", json=payload)
|
|
229
|
-
|
|
230
|
-
def enable_access(self, request_id: str, *, port: int = 8080) -> dict:
|
|
231
|
-
"""Enable external access (URL or SSH) for a persistent job's VM.
|
|
232
|
-
|
|
233
|
-
Use ``port=22`` for SSH access, or ``port=8080`` (default) for HTTP/URL access.
|
|
234
|
-
"""
|
|
235
|
-
return self._request(
|
|
236
|
-
"POST", f"/mags-jobs/{request_id}/access", json={"port": port}
|
|
237
|
-
)
|
|
238
|
-
|
|
239
|
-
def stop(self, name_or_id: str) -> dict:
|
|
240
|
-
"""Stop a running job.
|
|
241
|
-
|
|
242
|
-
Accepts a job ID, job name, or workspace ID.
|
|
243
|
-
"""
|
|
244
|
-
request_id = self._resolve_job_id(name_or_id)
|
|
245
|
-
return self._request("POST", f"/mags-jobs/{request_id}/stop")
|
|
246
|
-
|
|
247
|
-
def resize(
|
|
248
|
-
self,
|
|
249
|
-
workspace: str,
|
|
250
|
-
disk_gb: int,
|
|
251
|
-
*,
|
|
252
|
-
timeout: float = 30.0,
|
|
253
|
-
poll_interval: float = 1.0,
|
|
254
|
-
) -> dict:
|
|
255
|
-
"""Resize a workspace's disk. Stops the existing VM, then creates a new one.
|
|
256
|
-
|
|
257
|
-
Workspace files are preserved in S3.
|
|
258
|
-
Returns ``{"request_id": ..., "status": "running"}``.
|
|
259
|
-
"""
|
|
260
|
-
existing = self.find_job(workspace)
|
|
261
|
-
if existing and existing.get("status") == "running":
|
|
262
|
-
self._request("POST", f"/mags-jobs/{existing['request_id']}/sync")
|
|
263
|
-
self._request("POST", f"/mags-jobs/{existing['request_id']}/stop")
|
|
264
|
-
time.sleep(1)
|
|
265
|
-
elif existing and existing.get("status") == "sleeping":
|
|
266
|
-
self._request("POST", f"/mags-jobs/{existing['request_id']}/stop")
|
|
267
|
-
time.sleep(1)
|
|
268
|
-
|
|
269
|
-
return self.new(workspace, persistent=True, disk_gb=disk_gb,
|
|
270
|
-
timeout=timeout, poll_interval=poll_interval)
|
|
271
|
-
|
|
272
|
-
def new(
|
|
273
|
-
self,
|
|
274
|
-
name: str,
|
|
275
|
-
*,
|
|
276
|
-
persistent: bool = False,
|
|
277
|
-
base_workspace_id: str | None = None,
|
|
278
|
-
disk_gb: int | None = None,
|
|
279
|
-
timeout: float = 30.0,
|
|
280
|
-
poll_interval: float = 1.0,
|
|
281
|
-
) -> dict:
|
|
282
|
-
"""Create a new VM sandbox and wait until it's running.
|
|
283
|
-
|
|
284
|
-
By default, data lives on local disk only (no S3 sync).
|
|
285
|
-
Pass ``persistent=True`` to enable S3 data persistence.
|
|
286
|
-
|
|
287
|
-
Equivalent to ``mags new <name>`` or ``mags new <name> -p``.
|
|
288
|
-
|
|
289
|
-
Returns ``{"request_id": ..., "status": "running"}``.
|
|
290
|
-
"""
|
|
291
|
-
result = self.run(
|
|
292
|
-
"sleep infinity",
|
|
293
|
-
workspace_id=name,
|
|
294
|
-
persistent=True,
|
|
295
|
-
no_sync=not persistent,
|
|
296
|
-
base_workspace_id=base_workspace_id,
|
|
297
|
-
disk_gb=disk_gb,
|
|
298
|
-
)
|
|
299
|
-
request_id = result["request_id"]
|
|
300
|
-
|
|
301
|
-
deadline = time.monotonic() + timeout
|
|
302
|
-
while time.monotonic() < deadline:
|
|
303
|
-
st = self.status(request_id)
|
|
304
|
-
if st["status"] == "running" and st.get("vm_id"):
|
|
305
|
-
return {"request_id": request_id, "status": "running"}
|
|
306
|
-
if st["status"] in ("completed", "error"):
|
|
307
|
-
raise MagsError(f"Job {request_id} ended unexpectedly: {st['status']}")
|
|
308
|
-
time.sleep(poll_interval)
|
|
309
|
-
|
|
310
|
-
raise MagsError(f"Job {request_id} did not start within {timeout}s")
|
|
311
|
-
|
|
312
|
-
def find_job(self, name_or_id: str) -> dict | None:
|
|
313
|
-
"""Find a running or sleeping job by name, workspace ID, or job ID.
|
|
314
|
-
|
|
315
|
-
Uses the same resolution priority as the CLI:
|
|
316
|
-
running/sleeping exact name → workspace ID → any status exact name.
|
|
317
|
-
Returns the job dict or ``None``.
|
|
318
|
-
"""
|
|
319
|
-
jobs = self.list_jobs(page_size=50).get("jobs", [])
|
|
320
|
-
|
|
321
|
-
# Priority 1: exact name match, running/sleeping
|
|
322
|
-
for j in jobs:
|
|
323
|
-
if j.get("name") == name_or_id and j.get("status") in ("running", "sleeping"):
|
|
324
|
-
return j
|
|
325
|
-
|
|
326
|
-
# Priority 2: workspace_id match, running/sleeping
|
|
327
|
-
for j in jobs:
|
|
328
|
-
if j.get("workspace_id") == name_or_id and j.get("status") in ("running", "sleeping"):
|
|
329
|
-
return j
|
|
330
|
-
|
|
331
|
-
# Priority 3: exact name match, any status
|
|
332
|
-
for j in jobs:
|
|
333
|
-
if j.get("name") == name_or_id:
|
|
334
|
-
return j
|
|
335
|
-
|
|
336
|
-
# Priority 4: workspace_id match, any status
|
|
337
|
-
for j in jobs:
|
|
338
|
-
if j.get("workspace_id") == name_or_id:
|
|
339
|
-
return j
|
|
340
|
-
|
|
341
|
-
return None
|
|
342
|
-
|
|
343
|
-
def url(self, name_or_id: str, *, port: int = 8080) -> dict:
|
|
344
|
-
"""Enable public URL access for a job's VM.
|
|
345
|
-
|
|
346
|
-
Accepts a job ID, job name, or workspace ID.
|
|
347
|
-
Returns dict with ``url`` and access details.
|
|
348
|
-
"""
|
|
349
|
-
request_id = self._resolve_job_id(name_or_id)
|
|
350
|
-
st = self.status(request_id)
|
|
351
|
-
resp = self.enable_access(request_id, port=port)
|
|
352
|
-
subdomain = st.get("subdomain") or resp.get("subdomain")
|
|
353
|
-
if subdomain:
|
|
354
|
-
resp["url"] = f"https://{subdomain}.apps.magpiecloud.com"
|
|
355
|
-
return resp
|
|
356
|
-
|
|
357
|
-
def exec(self, name_or_id: str, command: str, *, timeout: int = 30) -> dict:
|
|
358
|
-
"""Execute a command on an existing running/sleeping sandbox via SSH.
|
|
359
|
-
|
|
360
|
-
Equivalent to ``mags exec <workspace> '<command>'``.
|
|
361
|
-
|
|
362
|
-
Returns ``{"exit_code": int, "output": str}``.
|
|
363
|
-
"""
|
|
364
|
-
job = self.find_job(name_or_id)
|
|
365
|
-
if not job:
|
|
366
|
-
raise MagsError(f"No running or sleeping VM found for '{name_or_id}'")
|
|
367
|
-
if job["status"] not in ("running", "sleeping"):
|
|
368
|
-
raise MagsError(
|
|
369
|
-
f"VM for '{name_or_id}' is {job['status']}, needs to be running or sleeping"
|
|
370
|
-
)
|
|
371
|
-
|
|
372
|
-
request_id = job.get("request_id") or job.get("id")
|
|
373
|
-
|
|
374
|
-
# Call /access directly — for sleeping VMs this triggers the wake
|
|
375
|
-
access = self.enable_access(request_id, port=22)
|
|
376
|
-
|
|
377
|
-
if not access.get("success") or not access.get("ssh_host"):
|
|
378
|
-
raise MagsError(
|
|
379
|
-
f"Failed to enable SSH access: {access.get('error', 'unknown error')}"
|
|
380
|
-
)
|
|
381
|
-
|
|
382
|
-
ssh_host = access["ssh_host"]
|
|
383
|
-
ssh_port = str(access["ssh_port"])
|
|
384
|
-
ssh_key = access.get("ssh_private_key", "")
|
|
385
|
-
|
|
386
|
-
# Wrap command to handle chroot overlay, same as CLI
|
|
387
|
-
escaped = command.replace("'", "'\\''")
|
|
388
|
-
wrapped = (
|
|
389
|
-
f"if [ -d /overlay/bin ]; then "
|
|
390
|
-
f"chroot /overlay /bin/sh -l -c 'cd /root 2>/dev/null; {escaped}'; "
|
|
391
|
-
f"else cd /root 2>/dev/null; {escaped}; fi"
|
|
392
|
-
)
|
|
393
|
-
|
|
394
|
-
key_file = None
|
|
395
|
-
try:
|
|
396
|
-
ssh_args = [
|
|
397
|
-
"ssh",
|
|
398
|
-
"-o", "StrictHostKeyChecking=no",
|
|
399
|
-
"-o", "UserKnownHostsFile=/dev/null",
|
|
400
|
-
"-o", "LogLevel=ERROR",
|
|
401
|
-
"-p", ssh_port,
|
|
402
|
-
]
|
|
403
|
-
if ssh_key:
|
|
404
|
-
fd, key_file = tempfile.mkstemp(prefix="mags_ssh_")
|
|
405
|
-
os.write(fd, ssh_key.encode())
|
|
406
|
-
os.close(fd)
|
|
407
|
-
os.chmod(key_file, 0o600)
|
|
408
|
-
ssh_args.extend(["-i", key_file])
|
|
409
|
-
|
|
410
|
-
ssh_args.append(f"root@{ssh_host}")
|
|
411
|
-
ssh_args.append(wrapped)
|
|
412
|
-
|
|
413
|
-
proc = subprocess.run(
|
|
414
|
-
ssh_args,
|
|
415
|
-
capture_output=True,
|
|
416
|
-
text=True,
|
|
417
|
-
timeout=timeout,
|
|
418
|
-
)
|
|
419
|
-
return {
|
|
420
|
-
"exit_code": proc.returncode,
|
|
421
|
-
"output": proc.stdout,
|
|
422
|
-
"stderr": proc.stderr,
|
|
423
|
-
}
|
|
424
|
-
except subprocess.TimeoutExpired:
|
|
425
|
-
raise MagsError(f"Command timed out after {timeout}s")
|
|
426
|
-
finally:
|
|
427
|
-
if key_file:
|
|
428
|
-
try:
|
|
429
|
-
os.unlink(key_file)
|
|
430
|
-
except OSError:
|
|
431
|
-
pass
|
|
432
|
-
|
|
433
|
-
def usage(self, *, window_days: int = 30) -> dict:
|
|
434
|
-
"""Get aggregated usage summary."""
|
|
435
|
-
return self._request(
|
|
436
|
-
"GET", "/mags-jobs/usage", params={"window_days": window_days}
|
|
437
|
-
)
|
|
438
|
-
|
|
439
|
-
# ── internal helpers ─────────────────────────────────────────────
|
|
440
|
-
|
|
441
|
-
def _resolve_job_id(self, name_or_id: str) -> str:
|
|
442
|
-
"""Resolve a job name, workspace ID, or UUID to a request_id."""
|
|
443
|
-
# If it looks like a UUID, use directly
|
|
444
|
-
if len(name_or_id) >= 32 and "-" in name_or_id:
|
|
445
|
-
return name_or_id
|
|
446
|
-
job = self.find_job(name_or_id)
|
|
447
|
-
if not job:
|
|
448
|
-
raise MagsError(f"No job found for '{name_or_id}'")
|
|
449
|
-
return job.get("request_id") or job.get("id")
|
|
450
|
-
|
|
451
|
-
# ── file uploads ─────────────────────────────────────────────────
|
|
452
|
-
|
|
453
|
-
def upload_file(self, file_path: str) -> str:
|
|
454
|
-
"""Upload a single file. Returns the file ID."""
|
|
455
|
-
p = Path(file_path)
|
|
456
|
-
if not p.exists():
|
|
457
|
-
raise MagsError(f"File not found: {file_path}")
|
|
458
|
-
|
|
459
|
-
url = f"{self.api_url}/api/v1/mags-files"
|
|
460
|
-
# Use a fresh request without the JSON content-type
|
|
461
|
-
resp = requests.post(
|
|
462
|
-
url,
|
|
463
|
-
files={"file": (p.name, p.read_bytes(), "application/octet-stream")},
|
|
464
|
-
headers={"Authorization": f"Bearer {self.api_token}"},
|
|
465
|
-
timeout=self.timeout,
|
|
466
|
-
)
|
|
467
|
-
if resp.status_code >= 400:
|
|
468
|
-
raise MagsError(resp.text, status_code=resp.status_code)
|
|
469
|
-
data = resp.json()
|
|
470
|
-
file_id = data.get("file_id")
|
|
471
|
-
if not file_id:
|
|
472
|
-
raise MagsError(f"Upload failed for {p.name}: {data}")
|
|
473
|
-
return file_id
|
|
474
|
-
|
|
475
|
-
def upload_files(self, file_paths: List[str]) -> List[str]:
|
|
476
|
-
"""Upload multiple files. Returns a list of file IDs."""
|
|
477
|
-
return [self.upload_file(fp) for fp in file_paths]
|
|
478
|
-
|
|
479
|
-
# ── workspaces ────────────────────────────────────────────────────
|
|
480
|
-
|
|
481
|
-
def list_workspaces(self) -> dict:
|
|
482
|
-
"""List all workspaces. Returns ``{"workspaces": [...], "total": N}``."""
|
|
483
|
-
return self._request("GET", "/mags-workspaces")
|
|
484
|
-
|
|
485
|
-
def delete_workspace(self, workspace_id: str) -> dict:
|
|
486
|
-
"""Delete a workspace and all its stored data.
|
|
487
|
-
|
|
488
|
-
This permanently removes the workspace filesystem from S3.
|
|
489
|
-
Active jobs using the workspace must be stopped first.
|
|
490
|
-
"""
|
|
491
|
-
return self._request("DELETE", f"/mags-workspaces/{workspace_id}")
|
|
492
|
-
|
|
493
|
-
def sync(self, request_id: str) -> dict:
|
|
494
|
-
"""Sync a running job's workspace to S3 without stopping the VM.
|
|
495
|
-
|
|
496
|
-
Use this to persist workspace changes immediately, e.g. after
|
|
497
|
-
setting up a base image.
|
|
498
|
-
"""
|
|
499
|
-
return self._request("POST", f"/mags-jobs/{request_id}/sync")
|
|
500
|
-
|
|
501
|
-
# ── cron jobs ────────────────────────────────────────────────────
|
|
502
|
-
|
|
503
|
-
def cron_create(
|
|
504
|
-
self,
|
|
505
|
-
*,
|
|
506
|
-
name: str,
|
|
507
|
-
cron_expression: str,
|
|
508
|
-
script: str,
|
|
509
|
-
workspace_id: str | None = None,
|
|
510
|
-
environment: Dict[str, str] | None = None,
|
|
511
|
-
persistent: bool = False,
|
|
512
|
-
) -> dict:
|
|
513
|
-
"""Create a scheduled cron job."""
|
|
514
|
-
payload = {
|
|
515
|
-
"name": name,
|
|
516
|
-
"cron_expression": cron_expression,
|
|
517
|
-
"script": script,
|
|
518
|
-
"persistent": persistent,
|
|
519
|
-
}
|
|
520
|
-
if workspace_id:
|
|
521
|
-
payload["workspace_id"] = workspace_id
|
|
522
|
-
if environment:
|
|
523
|
-
payload["environment"] = environment
|
|
524
|
-
return self._request("POST", "/mags-cron", json=payload)
|
|
525
|
-
|
|
526
|
-
def cron_list(self) -> dict:
|
|
527
|
-
"""List all cron jobs."""
|
|
528
|
-
return self._request("GET", "/mags-cron")
|
|
529
|
-
|
|
530
|
-
def cron_get(self, cron_id: str) -> dict:
|
|
531
|
-
"""Get a cron job by ID."""
|
|
532
|
-
return self._request("GET", f"/mags-cron/{cron_id}")
|
|
533
|
-
|
|
534
|
-
def cron_update(self, cron_id: str, **updates: Any) -> dict:
|
|
535
|
-
"""Update a cron job. Pass fields as keyword arguments."""
|
|
536
|
-
return self._request("PATCH", f"/mags-cron/{cron_id}", json=updates)
|
|
537
|
-
|
|
538
|
-
def cron_delete(self, cron_id: str) -> dict:
|
|
539
|
-
"""Delete a cron job."""
|
|
540
|
-
return self._request("DELETE", f"/mags-cron/{cron_id}")
|
|
541
|
-
|
|
542
|
-
# ── URL aliases ──────────────────────────────────────────────────
|
|
543
|
-
|
|
544
|
-
def url_alias_create(
|
|
545
|
-
self,
|
|
546
|
-
subdomain: str,
|
|
547
|
-
workspace_id: str,
|
|
548
|
-
domain: str = "apps.magpiecloud.com",
|
|
549
|
-
) -> dict:
|
|
550
|
-
"""Create a stable URL alias for a workspace.
|
|
551
|
-
|
|
552
|
-
The alias maps ``subdomain.<domain>`` to the active job in the workspace.
|
|
553
|
-
Use ``domain="app.lfg.run"`` for the LFG domain.
|
|
554
|
-
|
|
555
|
-
Returns ``{"id": ..., "subdomain": ..., "url": ...}``.
|
|
556
|
-
"""
|
|
557
|
-
return self._request(
|
|
558
|
-
"POST",
|
|
559
|
-
"/mags-url-aliases",
|
|
560
|
-
json={
|
|
561
|
-
"subdomain": subdomain,
|
|
562
|
-
"workspace_id": workspace_id,
|
|
563
|
-
"domain": domain,
|
|
564
|
-
},
|
|
565
|
-
)
|
|
566
|
-
|
|
567
|
-
def url_alias_list(self) -> dict:
|
|
568
|
-
"""List all URL aliases. Returns ``{"aliases": [...], "total": N}``."""
|
|
569
|
-
return self._request("GET", "/mags-url-aliases")
|
|
570
|
-
|
|
571
|
-
def url_alias_delete(self, subdomain: str) -> dict:
|
|
572
|
-
"""Delete a URL alias by subdomain."""
|
|
573
|
-
return self._request("DELETE", f"/mags-url-aliases/{subdomain}")
|
package/python/test_sdk.py
DELETED
|
@@ -1,78 +0,0 @@
|
|
|
1
|
-
"""Test the Mags Python SDK: new, exec, url."""
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
import time
|
|
6
|
-
|
|
7
|
-
os.environ["MAGS_API_TOKEN"] = "da214df72c164bda47970491fd839247a864c2599305ce90b38512d43ed034ea"
|
|
8
|
-
|
|
9
|
-
# Use local src/ instead of installed package
|
|
10
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
|
|
11
|
-
|
|
12
|
-
from mags import Mags
|
|
13
|
-
|
|
14
|
-
m = Mags()
|
|
15
|
-
WS = "sdk-test-" + str(int(time.time()))
|
|
16
|
-
|
|
17
|
-
print(f"=== Test 1: Create new VM ({WS}) ===")
|
|
18
|
-
result = m.new(WS)
|
|
19
|
-
print(f" request_id: {result['request_id']}")
|
|
20
|
-
print(f" status: {result['status']}")
|
|
21
|
-
assert result["status"] == "running"
|
|
22
|
-
print(" PASS\n")
|
|
23
|
-
|
|
24
|
-
print("=== Test 2: find_job ===")
|
|
25
|
-
job = m.find_job(WS)
|
|
26
|
-
print(f" found: workspace={job.get('workspace_id')} status={job.get('status')}")
|
|
27
|
-
assert job is not None
|
|
28
|
-
assert job["status"] == "running"
|
|
29
|
-
print(" PASS\n")
|
|
30
|
-
|
|
31
|
-
print("=== Test 3: exec - simple command ===")
|
|
32
|
-
try:
|
|
33
|
-
result = m.exec(WS, "echo HELLO_FROM_SDK && uname -a", timeout=15)
|
|
34
|
-
print(f" exit_code: {result['exit_code']}")
|
|
35
|
-
print(f" output: {result['output'].strip()}")
|
|
36
|
-
if result["stderr"]:
|
|
37
|
-
print(f" stderr: {result['stderr'].strip()}")
|
|
38
|
-
assert "HELLO_FROM_SDK" in result["output"]
|
|
39
|
-
print(" PASS\n")
|
|
40
|
-
except Exception as e:
|
|
41
|
-
print(f" FAIL: {e}\n")
|
|
42
|
-
|
|
43
|
-
print("=== Test 4: exec - create HTML file ===")
|
|
44
|
-
html = "<html><body><h1>Mags SDK Test</h1><p>Served from a microVM!</p></body></html>"
|
|
45
|
-
try:
|
|
46
|
-
result = m.exec(WS, f"echo '{html}' > /root/index.html", timeout=15)
|
|
47
|
-
print(f" exit_code: {result['exit_code']}")
|
|
48
|
-
result = m.exec(WS, "cat /root/index.html", timeout=15)
|
|
49
|
-
print(f" content: {result['output'].strip()[:80]}...")
|
|
50
|
-
print(" PASS\n")
|
|
51
|
-
except Exception as e:
|
|
52
|
-
print(f" FAIL: {e}\n")
|
|
53
|
-
|
|
54
|
-
print("=== Test 5: exec - start python HTTP server ===")
|
|
55
|
-
try:
|
|
56
|
-
result = m.exec(WS, "nohup python3 -m http.server 8080 --directory /root > /tmp/srv.log 2>&1 & echo PID=$!", timeout=15)
|
|
57
|
-
print(f" output: {result['output'].strip()}")
|
|
58
|
-
time.sleep(1)
|
|
59
|
-
# Verify it's running
|
|
60
|
-
result = m.exec(WS, "curl -s http://localhost:8080/index.html | head -1", timeout=15)
|
|
61
|
-
print(f" curl: {result['output'].strip()[:80]}")
|
|
62
|
-
print(" PASS\n")
|
|
63
|
-
except Exception as e:
|
|
64
|
-
print(f" FAIL: {e}\n")
|
|
65
|
-
|
|
66
|
-
print("=== Test 6: url - enable public URL ===")
|
|
67
|
-
try:
|
|
68
|
-
info = m.url(WS, port=8080)
|
|
69
|
-
print(f" success: {info.get('success')}")
|
|
70
|
-
print(f" url: {info.get('url', 'N/A')}")
|
|
71
|
-
if info.get("url"):
|
|
72
|
-
print(f"\n >>> Visit: {info['url']} <<<")
|
|
73
|
-
print(" PASS\n")
|
|
74
|
-
except Exception as e:
|
|
75
|
-
print(f" FAIL: {e}\n")
|
|
76
|
-
|
|
77
|
-
print("=== All tests complete ===")
|
|
78
|
-
print(f"Workspace: {WS}")
|