@magpiecloud/mags 1.7.0 → 1.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/mags.js CHANGED
@@ -202,6 +202,7 @@ ${colors.bold}Commands:${colors.reset}
202
202
  new <name> Create a new persistent VM (returns ID only)
203
203
  run [options] <script> Execute a script on a microVM
204
204
  ssh <workspace|name|id> Open SSH session (auto-starts VM if needed)
205
+ exec <workspace> <command> Run a command on an existing VM
205
206
  status <name|id> Get job status
206
207
  logs <name|id> Get job logs
207
208
  list List recent jobs
@@ -240,6 +241,7 @@ ${colors.bold}Examples:${colors.reset}
240
241
  mags login
241
242
  mags new myvm # Create VM, get ID
242
243
  mags ssh myvm # SSH (auto-starts if needed)
244
+ mags exec myvm 'ls -la' # Run command on existing VM
243
245
  mags run 'echo Hello World'
244
246
  mags run -e 'echo fast' # Ephemeral (no S3 sync)
245
247
  mags run -f script.py 'python3 script.py' # Upload + run file
@@ -389,10 +391,21 @@ To use Mags, you need to authenticate first.
389
391
  }
390
392
 
391
393
  // Create a new persistent VM
392
- async function newVM(name) {
394
+ async function newVM(args) {
395
+ let name = null;
396
+ let baseWorkspace = null;
397
+
398
+ for (let i = 0; i < args.length; i++) {
399
+ if (args[i] === '--base' && args[i + 1]) {
400
+ baseWorkspace = args[++i];
401
+ } else if (!name) {
402
+ name = args[i];
403
+ }
404
+ }
405
+
393
406
  if (!name) {
394
407
  log('red', 'Error: Name required');
395
- console.log(`\nUsage: mags new <name>\n`);
408
+ console.log(`\nUsage: mags new <name> [--base <workspace>]\n`);
396
409
  process.exit(1);
397
410
  }
398
411
 
@@ -404,6 +417,7 @@ async function newVM(name) {
404
417
  workspace_id: name,
405
418
  startup_command: 'sleep infinity'
406
419
  };
420
+ if (baseWorkspace) payload.base_workspace_id = baseWorkspace;
407
421
 
408
422
  const response = await request('POST', '/api/v1/mags-jobs', payload);
409
423
 
@@ -1229,6 +1243,75 @@ async function sshToJob(nameOrId) {
1229
1243
  });
1230
1244
  }
1231
1245
 
1246
+ async function execOnJob(nameOrId, command) {
1247
+ if (!nameOrId || !command) {
1248
+ log('red', 'Error: Workspace and command required');
1249
+ console.log(`\nUsage: mags exec <workspace|name|id> <command>\n`);
1250
+ console.log('Examples:');
1251
+ console.log(' mags exec myproject "ls -la"');
1252
+ console.log(' mags exec myproject "node --version"');
1253
+ process.exit(1);
1254
+ }
1255
+
1256
+ // Find a running/sleeping job for this workspace
1257
+ const existingJob = await findWorkspaceJob(nameOrId);
1258
+
1259
+ let jobID;
1260
+ if (existingJob && existingJob.status === 'running') {
1261
+ log('green', `Found running VM for '${nameOrId}'`);
1262
+ jobID = existingJob.request_id;
1263
+ } else if (existingJob && existingJob.status === 'sleeping') {
1264
+ log('yellow', `Waking sleeping VM for '${nameOrId}'...`);
1265
+ jobID = existingJob.request_id;
1266
+ } else {
1267
+ log('red', `No running or sleeping VM found for '${nameOrId}'`);
1268
+ log('gray', `Start one with: mags new ${nameOrId}`);
1269
+ process.exit(1);
1270
+ }
1271
+
1272
+ // Enable SSH access
1273
+ log('blue', 'Enabling SSH access...');
1274
+ const accessResp = await request('POST', `/api/v1/mags-jobs/${jobID}/access`, { port: 22 });
1275
+
1276
+ if (!accessResp.success || !accessResp.ssh_host || !accessResp.ssh_port) {
1277
+ log('red', 'Failed to enable SSH access');
1278
+ if (accessResp.error) log('red', accessResp.error);
1279
+ process.exit(1);
1280
+ }
1281
+
1282
+ // Write SSH key to temp file
1283
+ let keyFile = null;
1284
+ const sshArgs = [
1285
+ '-o', 'StrictHostKeyChecking=no',
1286
+ '-o', 'UserKnownHostsFile=/dev/null',
1287
+ '-o', 'LogLevel=ERROR',
1288
+ '-p', accessResp.ssh_port.toString()
1289
+ ];
1290
+
1291
+ if (accessResp.ssh_private_key) {
1292
+ keyFile = path.join(os.tmpdir(), `mags_ssh_${Date.now()}`);
1293
+ fs.writeFileSync(keyFile, accessResp.ssh_private_key, { mode: 0o600 });
1294
+ sshArgs.push('-i', keyFile);
1295
+ }
1296
+
1297
+ // Wrap command to use chroot if overlay is mounted
1298
+ const wrappedCmd = `if [ -d /overlay/bin ]; then chroot /overlay /bin/sh -c '${command.replace(/'/g, "'\\''")}'; else /bin/sh -c '${command.replace(/'/g, "'\\''")}'; fi`;
1299
+ sshArgs.push(`root@${accessResp.ssh_host}`, wrappedCmd);
1300
+
1301
+ const ssh = spawn('ssh', sshArgs, { stdio: 'inherit' });
1302
+
1303
+ ssh.on('error', (err) => {
1304
+ if (keyFile) try { fs.unlinkSync(keyFile); } catch (e) {}
1305
+ log('red', `SSH error: ${err.message}`);
1306
+ process.exit(1);
1307
+ });
1308
+
1309
+ ssh.on('close', (code) => {
1310
+ if (keyFile) try { fs.unlinkSync(keyFile); } catch (e) {}
1311
+ process.exit(code || 0);
1312
+ });
1313
+ }
1314
+
1232
1315
  async function main() {
1233
1316
  const args = process.argv.slice(2);
1234
1317
  const command = args[0];
@@ -1255,7 +1338,7 @@ async function main() {
1255
1338
  break;
1256
1339
  case 'new':
1257
1340
  await requireAuth();
1258
- await newVM(args[1]);
1341
+ await newVM(args.slice(1));
1259
1342
  break;
1260
1343
  case 'run':
1261
1344
  await requireAuth();
@@ -1265,6 +1348,10 @@ async function main() {
1265
1348
  await requireAuth();
1266
1349
  await sshToJob(args[1]);
1267
1350
  break;
1351
+ case 'exec':
1352
+ await requireAuth();
1353
+ await execOnJob(args[1], args.slice(2).join(' '));
1354
+ break;
1268
1355
  case 'url':
1269
1356
  await requireAuth();
1270
1357
  await enableUrlAccess(args[1], parseInt(args[2]) || 8080);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@magpiecloud/mags",
3
- "version": "1.7.0",
3
+ "version": "1.7.2",
4
4
  "description": "Mags CLI - Execute scripts on Magpie's instant VM infrastructure",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -116,20 +116,39 @@ result = run_with_packages(
116
116
 
117
117
  ### With a pre-built base image
118
118
 
119
- For repeated runs, avoid re-installing packages every time by using a base workspace:
119
+ For repeated runs, avoid re-installing packages every time by creating a base workspace and syncing it:
120
120
 
121
121
  ```python
122
122
  # One-time setup: create a base workspace with common packages
123
- m.run_and_wait(
123
+ job = m.run(
124
124
  "pip install pandas numpy requests flask scikit-learn",
125
125
  workspace_id="python-base",
126
+ persistent=True,
126
127
  )
127
128
 
129
+ # Wait for setup to finish, then sync to S3
130
+ import time
131
+ for _ in range(60):
132
+ status = m.status(job["request_id"])
133
+ if status["status"] == "running":
134
+ break
135
+ time.sleep(1)
136
+
137
+ # Force sync — persists everything to S3 immediately
138
+ m.sync(job["request_id"])
139
+
128
140
  # Every subsequent run inherits the base (read-only, no install needed)
129
141
  result = m.run_and_wait(
130
142
  "python3 -c 'import pandas; print(pandas.__version__)'",
131
143
  base_workspace_id="python-base",
132
144
  )
145
+
146
+ # Fork: load base, save changes to a new workspace
147
+ result = m.run_and_wait(
148
+ "pip install torch",
149
+ base_workspace_id="python-base",
150
+ workspace_id="python-ml",
151
+ )
133
152
  ```
134
153
 
135
154
  ---
@@ -259,7 +278,7 @@ def run_data_pipeline(sql_query, workspace_id="etl-pipeline"):
259
278
  python3 << 'PYEOF'
260
279
  import sqlite3, json
261
280
 
262
- conn = sqlite3.connect("/workspace/data.db")
281
+ conn = sqlite3.connect("/root/data.db")
263
282
  cursor = conn.execute("{sql_query}")
264
283
  rows = cursor.fetchall()
265
284
  print(json.dumps(rows))
@@ -430,7 +449,7 @@ cron = m.cron_create(
430
449
  m.cron_create(
431
450
  name="db-backup",
432
451
  cron_expression="0 2 * * *",
433
- script="pg_dump $DATABASE_URL | gzip > /workspace/backup-$(date +%F).sql.gz",
452
+ script="pg_dump $DATABASE_URL | gzip > /root/backup-$(date +%F).sql.gz",
434
453
  workspace_id="backups",
435
454
  )
436
455
 
@@ -462,17 +481,17 @@ def deploy_preview(user_id, html_content):
462
481
  import shlex
463
482
 
464
483
  script = f"""
465
- mkdir -p /workspace/site
466
- cat > /workspace/site/index.html << 'HTMLEOF'
484
+ mkdir -p /root/site
485
+ cat > /root/site/index.html << 'HTMLEOF'
467
486
  {html_content}
468
487
  HTMLEOF
469
- cd /workspace/site && python3 -m http.server 8080
488
+ cd /root/site && python3 -m http.server 8080
470
489
  """
471
490
  job = m.run(
472
491
  script,
473
492
  workspace_id=f"preview-{user_id}",
474
493
  persistent=True,
475
- startup_command="cd /workspace/site && python3 -m http.server 8080",
494
+ startup_command="cd /root/site && python3 -m http.server 8080",
476
495
  )
477
496
 
478
497
  # Wait for VM to start
@@ -528,6 +547,40 @@ m.run_and_wait("echo 'no persistence'", ephemeral=True)
528
547
  | Read-only base | omit | `"my-base"` | Base mounted read-only. Changes discarded. |
529
548
  | Fork | `"fork-1"` | `"my-base"` | Starts from base, saves to `fork-1`. |
530
549
 
550
+ ### Syncing workspaces
551
+
552
+ Workspaces sync to S3 automatically when a job completes. For persistent VMs (`persistent=True`), workspaces also sync every 30 seconds and on sleep.
553
+
554
+ Use `m.sync()` to force an immediate sync without stopping the VM — useful for persisting a base image you've just set up:
555
+
556
+ ```python
557
+ # Set up a base workspace on a persistent VM
558
+ job = m.run(
559
+ "pip install pandas numpy scikit-learn",
560
+ workspace_id="ml-base",
561
+ persistent=True,
562
+ )
563
+
564
+ # Wait for the install to finish
565
+ import time
566
+ for _ in range(60):
567
+ status = m.status(job["request_id"])
568
+ if status["status"] == "running":
569
+ break
570
+ time.sleep(1)
571
+
572
+ # Force sync — base image is now available for other jobs
573
+ m.sync(job["request_id"])
574
+
575
+ # List and manage workspaces
576
+ workspaces = m.list_workspaces()
577
+ for ws in workspaces.get("workspaces", []):
578
+ print(f"{ws['workspace_id']} — {ws['job_count']} jobs")
579
+
580
+ # Delete a workspace (removes stored data from S3)
581
+ m.delete_workspace("old-workspace")
582
+ ```
583
+
531
584
  ---
532
585
 
533
586
  ## File Uploads
@@ -241,6 +241,14 @@ class Mags:
241
241
  """
242
242
  return self._request("DELETE", f"/mags-workspaces/{workspace_id}")
243
243
 
244
+ def sync(self, request_id: str) -> dict:
245
+ """Sync a running job's workspace to S3 without stopping the VM.
246
+
247
+ Use this to persist workspace changes immediately, e.g. after
248
+ setting up a base image.
249
+ """
250
+ return self._request("POST", f"/mags-jobs/{request_id}/sync")
251
+
244
252
  # ── cron jobs ────────────────────────────────────────────────────
245
253
 
246
254
  def cron_create(
@@ -203,6 +203,7 @@ mags login</code></pre>
203
203
  <tr><td><code>mags status &lt;id&gt;</code></td><td>Get job status</td></tr>
204
204
  <tr><td><code>mags logs &lt;id&gt;</code></td><td>Get job output</td></tr>
205
205
  <tr><td><code>mags stop &lt;id&gt;</code></td><td>Stop a running job</td></tr>
206
+ <tr><td><code>mags sync &lt;workspace&gt;</code></td><td>Sync workspace to S3 (without stopping VM)</td></tr>
206
207
  <tr><td><code>mags url &lt;id&gt; [port]</code></td><td>Enable public URL access</td></tr>
207
208
  <tr><td><code>mags workspace list</code></td><td>List persistent workspaces</td></tr>
208
209
  <tr><td><code>mags workspace delete &lt;id&gt;</code></td><td>Delete workspace + S3 data</td></tr>
@@ -243,7 +244,9 @@ mags run 'echo Hello World && uname -a'
243
244
  mags run -w myproject 'pip install flask requests'
244
245
  mags run -w myproject 'python3 app.py'
245
246
 
246
- # Base image &mdash; start from a pre-configured workspace
247
+ # Base image &mdash; create a golden image, sync it, then reuse
248
+ mags run -w golden -p 'apt install -y nodejs && npm install -g typescript'
249
+ mags sync golden # persist to S3
247
250
  mags run --base golden 'npm test' # read-only, changes discarded
248
251
  mags run --base golden -w fork-1 'npm test' # fork: load golden, save to fork-1
249
252