plato-sdk-v2 2.8.6__py3-none-any.whl → 2.8.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,14 +2,22 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import asyncio
6
+ import os
7
+ import subprocess
5
8
  from datetime import datetime
9
+ from pathlib import Path
6
10
  from typing import TYPE_CHECKING
7
11
 
12
+ from pydantic import BaseModel
13
+
8
14
  from plato._generated.api.v2 import jobs
9
15
  from plato._generated.models import (
10
16
  AppApiV2SchemasSessionCreateSnapshotRequest,
11
17
  ConnectRoutingInfoResult,
12
18
  CreateCheckpointRequest,
19
+ CreateDiskSnapshotRequest,
20
+ CreateDiskSnapshotResult,
13
21
  CreateSnapshotResult,
14
22
  ExecuteCommandRequest,
15
23
  ExecuteCommandResult,
@@ -24,6 +32,159 @@ if TYPE_CHECKING:
24
32
  from plato.v2.async_.session import Session
25
33
 
26
34
 
35
+ class SSHInfo(BaseModel):
36
+ """SSH connection information for an environment."""
37
+
38
+ job_id: str
39
+ gateway_host: str
40
+ private_key_path: str | None = None
41
+
42
+ @property
43
+ def sni(self) -> str:
44
+ """SNI for TLS routing through gateway."""
45
+ return f"{self.job_id}--22.{self.gateway_host}"
46
+
47
+ @property
48
+ def proxy_command(self) -> str:
49
+ """ProxyCommand for SSH config."""
50
+ return f"openssl s_client -quiet -connect {self.gateway_host}:443 -servername {self.sni} 2>/dev/null"
51
+
52
+ def ssh_command(self, command: str | None = None) -> list[str]:
53
+ """Build SSH command with all necessary options.
54
+
55
+ Args:
56
+ command: Optional command to run on remote. If None, opens interactive shell.
57
+
58
+ Returns:
59
+ List of command arguments for subprocess.
60
+ """
61
+ args = [
62
+ "ssh",
63
+ "-o",
64
+ "StrictHostKeyChecking=no",
65
+ "-o",
66
+ "UserKnownHostsFile=/dev/null",
67
+ "-o",
68
+ "LogLevel=ERROR",
69
+ "-o",
70
+ f"ProxyCommand={self.proxy_command}",
71
+ ]
72
+ if self.private_key_path:
73
+ args.extend(["-i", self.private_key_path])
74
+ args.append(f"root@{self.job_id}.plato")
75
+ if command:
76
+ args.append(command)
77
+ return args
78
+
79
+ def ssh_opts_string(self) -> str:
80
+ """Get SSH options as a string for use with rsync -e."""
81
+ opts = [
82
+ "-o StrictHostKeyChecking=no",
83
+ "-o UserKnownHostsFile=/dev/null",
84
+ "-o LogLevel=ERROR",
85
+ f"-o 'ProxyCommand={self.proxy_command}'",
86
+ ]
87
+ if self.private_key_path:
88
+ opts.insert(0, f"-i {self.private_key_path}")
89
+ return "ssh " + " ".join(opts)
90
+
91
+
92
+ class RsyncResult(BaseModel):
93
+ """Result of an rsync operation."""
94
+
95
+ success: bool
96
+ message: str
97
+ local_path: str
98
+ remote_path: str
99
+
100
+
101
+ class ReverseTunnel:
102
+ """A reverse SSH tunnel that allows the VM to connect back to local machine.
103
+
104
+ Uses SSH -R flag through the gateway to set up port forwarding from
105
+ VM back to local. This enables SSHFS mounts from VM to local filesystem.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ job_id: str,
111
+ private_key_path: str,
112
+ local_port: int = 22,
113
+ remote_port: int = 2222,
114
+ ):
115
+ self.job_id = job_id
116
+ self.private_key_path = private_key_path
117
+ self.local_port = local_port
118
+ self.remote_port = remote_port
119
+ self._process: subprocess.Popen | None = None
120
+
121
+ def start(self) -> bool:
122
+ """Start the reverse tunnel in background.
123
+
124
+ Returns:
125
+ True if tunnel started successfully.
126
+ """
127
+ gateway_host = os.getenv("PLATO_GATEWAY_HOST", "gateway.plato.so")
128
+ sni = f"{self.job_id}--22.{gateway_host}"
129
+ proxy_cmd = f"openssl s_client -quiet -connect {gateway_host}:443 -servername {sni} 2>/dev/null"
130
+
131
+ # SSH command with reverse port forwarding
132
+ # -R remote_port:localhost:local_port makes remote_port on VM forward to local_port on local machine
133
+ ssh_args = [
134
+ "ssh",
135
+ "-i",
136
+ self.private_key_path,
137
+ "-o",
138
+ "StrictHostKeyChecking=no",
139
+ "-o",
140
+ "UserKnownHostsFile=/dev/null",
141
+ "-o",
142
+ "LogLevel=ERROR",
143
+ "-o",
144
+ f"ProxyCommand={proxy_cmd}",
145
+ "-R",
146
+ f"{self.remote_port}:localhost:{self.local_port}",
147
+ "-N", # Don't execute remote command
148
+ f"root@{self.job_id}.plato",
149
+ ]
150
+
151
+ try:
152
+ self._process = subprocess.Popen(
153
+ ssh_args,
154
+ stdout=subprocess.DEVNULL,
155
+ stderr=subprocess.DEVNULL,
156
+ stdin=subprocess.DEVNULL,
157
+ )
158
+ # Give it a moment to establish
159
+ import time
160
+
161
+ time.sleep(1)
162
+ # Check if still running
163
+ if self._process.poll() is not None:
164
+ return False
165
+ return True
166
+ except Exception:
167
+ return False
168
+
169
+ def stop(self) -> None:
170
+ """Stop the reverse tunnel."""
171
+ if self._process:
172
+ self._process.terminate()
173
+ try:
174
+ self._process.wait(timeout=5)
175
+ except subprocess.TimeoutExpired:
176
+ self._process.kill()
177
+ self._process = None
178
+
179
+ def __enter__(self):
180
+ self.start()
181
+ return self
182
+
183
+ def __exit__(self, exc_type, exc_val, exc_tb):
184
+ self.stop()
185
+ return False
186
+
187
+
27
188
  class Environment:
28
189
  """An environment represents a single VM within a session.
29
190
 
@@ -40,11 +201,17 @@ class Environment:
40
201
  job_id: str,
41
202
  alias: str,
42
203
  artifact_id: str | None = None,
204
+ simulator: str | None = None,
205
+ status: str | None = None,
206
+ public_url: str | None = None,
43
207
  ):
44
208
  self._session = session
45
209
  self.job_id = job_id
46
210
  self.alias = alias
47
211
  self.artifact_id = artifact_id
212
+ self.simulator = simulator
213
+ self.status = status
214
+ self.public_url = public_url
48
215
 
49
216
  @property
50
217
  def _http(self):
@@ -194,3 +361,408 @@ class Environment:
194
361
 
195
362
  def __repr__(self) -> str:
196
363
  return f"Environment(alias={self.alias!r}, job_id={self.job_id!r})"
364
+
365
+ async def disk_snapshot(
366
+ self,
367
+ override_service: str | None = None,
368
+ override_version: str | None = None,
369
+ override_dataset: str | None = None,
370
+ target: str | None = None,
371
+ ) -> CreateDiskSnapshotResult:
372
+ """Create a disk-only snapshot of this environment.
373
+
374
+ Disk snapshots capture only the disk state (no memory). On resume, the VM
375
+ will do a fresh boot with the preserved disk state. This is faster to
376
+ create and smaller to store than full snapshots.
377
+
378
+ Args:
379
+ override_service: Override simulator/service name in artifact metadata.
380
+ override_version: Override version/tag in artifact metadata.
381
+ override_dataset: Override dataset name in artifact metadata.
382
+ target: Target domain for routing (e.g., sims.plato.so).
383
+
384
+ Returns:
385
+ CreateDiskSnapshotResult with artifact_id.
386
+
387
+ Example:
388
+ # Save as my-app:v1@production
389
+ result = await env.disk_snapshot(
390
+ override_service="my-app",
391
+ override_version="v1",
392
+ override_dataset="production",
393
+ target="sims.plato.so",
394
+ )
395
+ print(f"Artifact: {result.artifact_id}")
396
+ """
397
+ from plato._generated.api.v2.jobs import disk_snapshot
398
+
399
+ return await disk_snapshot.asyncio(
400
+ client=self._http,
401
+ job_id=self.job_id,
402
+ body=CreateDiskSnapshotRequest(
403
+ override_service=override_service,
404
+ override_version=override_version,
405
+ override_dataset=override_dataset,
406
+ target=target,
407
+ ),
408
+ x_api_key=self._api_key,
409
+ )
410
+
411
+ # =========================================================================
412
+ # SSH/Rsync Helpers
413
+ # =========================================================================
414
+
415
+ def get_ssh_info(self, private_key_path: str | None = None) -> SSHInfo:
416
+ """Get SSH connection info for this environment.
417
+
418
+ Args:
419
+ private_key_path: Path to private key file. Required for rsync/ssh.
420
+
421
+ Returns:
422
+ SSHInfo with connection details.
423
+
424
+ Example:
425
+ ssh_info = env.get_ssh_info("/path/to/key")
426
+ subprocess.run(ssh_info.ssh_command("ls -la"))
427
+ """
428
+ gateway_host = os.getenv("PLATO_GATEWAY_HOST", "gateway.plato.so")
429
+ return SSHInfo(
430
+ job_id=self.job_id,
431
+ gateway_host=gateway_host,
432
+ private_key_path=private_key_path,
433
+ )
434
+
435
+ async def rsync(
436
+ self,
437
+ local_path: str | Path,
438
+ remote_path: str,
439
+ private_key_path: str,
440
+ delete: bool = True,
441
+ exclude: list[str] | None = None,
442
+ verbose: bool = False,
443
+ ) -> RsyncResult:
444
+ """Rsync files to this environment.
445
+
446
+ Args:
447
+ local_path: Local file or directory path.
448
+ remote_path: Remote destination path on VM.
449
+ private_key_path: Path to SSH private key.
450
+ delete: Delete files on remote that don't exist locally (default True).
451
+ exclude: Additional patterns to exclude.
452
+ verbose: Show verbose output.
453
+
454
+ Returns:
455
+ RsyncResult with success status and message.
456
+
457
+ Example:
458
+ result = await env.rsync("./src", "/app/src", key_path)
459
+ if not result.success:
460
+ print(f"Rsync failed: {result.message}")
461
+ """
462
+ local_path_obj = Path(local_path)
463
+ if not local_path_obj.exists():
464
+ return RsyncResult(
465
+ success=False,
466
+ message=f"Local path does not exist: {local_path}",
467
+ local_path=str(local_path),
468
+ remote_path=remote_path,
469
+ )
470
+
471
+ is_file = local_path_obj.is_file()
472
+ ssh_info = self.get_ssh_info(private_key_path)
473
+
474
+ # Create remote directory first
475
+ if is_file:
476
+ mkdir_target = str(Path(remote_path).parent)
477
+ else:
478
+ mkdir_target = remote_path
479
+
480
+ await self.execute(f"mkdir -p {mkdir_target}", timeout=30)
481
+
482
+ # Build rsync command
483
+ ssh_cmd = ssh_info.ssh_opts_string()
484
+
485
+ if is_file:
486
+ rsync_args = [
487
+ "rsync",
488
+ "-az",
489
+ "-e",
490
+ ssh_cmd,
491
+ str(local_path),
492
+ f"root@{self.job_id}.plato:{remote_path}",
493
+ ]
494
+ else:
495
+ rsync_args = [
496
+ "rsync",
497
+ "-az",
498
+ "--filter=:- .gitignore",
499
+ "--exclude=.git",
500
+ "--exclude=__pycache__",
501
+ "--exclude=node_modules",
502
+ "--exclude=.venv",
503
+ "--exclude=*.pyc",
504
+ "-e",
505
+ ssh_cmd,
506
+ f"{local_path}/",
507
+ f"root@{self.job_id}.plato:{remote_path}/",
508
+ ]
509
+ if delete:
510
+ rsync_args.insert(2, "--delete")
511
+
512
+ # Add custom excludes
513
+ if exclude:
514
+ for pattern in exclude:
515
+ rsync_args.insert(-2, f"--exclude={pattern}")
516
+
517
+ if verbose:
518
+ rsync_args.insert(2, "-v")
519
+
520
+ # Run rsync in executor to not block
521
+ loop = asyncio.get_event_loop()
522
+
523
+ def _run_rsync():
524
+ try:
525
+ result = subprocess.run(
526
+ rsync_args,
527
+ capture_output=True,
528
+ text=True,
529
+ timeout=600,
530
+ )
531
+ if result.returncode != 0:
532
+ return (False, f"rsync failed: {result.stderr[:200]}")
533
+ return (True, "synced")
534
+ except subprocess.TimeoutExpired:
535
+ return (False, "rsync timed out")
536
+ except Exception as e:
537
+ return (False, str(e))
538
+
539
+ success, message = await loop.run_in_executor(None, _run_rsync)
540
+ return RsyncResult(
541
+ success=success,
542
+ message=message,
543
+ local_path=str(local_path),
544
+ remote_path=remote_path,
545
+ )
546
+
547
+ async def ensure_rsync(self) -> bool:
548
+ """Ensure rsync is installed on the VM.
549
+
550
+ Returns:
551
+ True if rsync is available (was installed or already present).
552
+ """
553
+ result = await self.execute(
554
+ "which rsync || apt-get install -y rsync",
555
+ timeout=60,
556
+ )
557
+ return result.exit_code == 0
558
+
559
+ async def ensure_sshfs(self) -> bool:
560
+ """Ensure sshfs is installed on the VM.
561
+
562
+ Returns:
563
+ True if sshfs is available (was installed or already present).
564
+ """
565
+ result = await self.execute(
566
+ "which sshfs || apt-get install -y sshfs",
567
+ timeout=60,
568
+ )
569
+ return result.exit_code == 0
570
+
571
+ async def mount_sshfs(
572
+ self,
573
+ local_path: str,
574
+ remote_mount_path: str,
575
+ ssh_port: int = 2222,
576
+ use_overlay: bool = True,
577
+ local_user: str | None = None,
578
+ identity_file: str = "/root/.ssh/id_ed25519",
579
+ ) -> bool:
580
+ """Mount a local directory on the VM via SSHFS (requires reverse tunnel).
581
+
582
+ This expects a reverse SSH tunnel to be set up from the VM back to
583
+ the local machine. Use with `create_reverse_tunnel()`.
584
+
585
+ Args:
586
+ local_path: Path on local machine to mount.
587
+ remote_mount_path: Where to mount on the VM.
588
+ ssh_port: Port for reverse tunnel (default 2222).
589
+ use_overlay: Use overlayfs for copy-on-write (default True).
590
+ local_user: Username on local machine for SSH connection.
591
+ identity_file: Path to SSH private key on the VM (default /root/.ssh/id_ed25519).
592
+
593
+ Returns:
594
+ True if mount succeeded.
595
+ """
596
+ # Create mount directories
597
+ # Upper/work dirs must be on local filesystem for overlay to work
598
+ if use_overlay:
599
+ lower_dir = f"{remote_mount_path}-lower"
600
+ upper_dir = f"/tmp/overlay{remote_mount_path}-upper"
601
+ work_dir = f"/tmp/overlay{remote_mount_path}-work"
602
+ setup_cmd = f"mkdir -p {lower_dir} {upper_dir} {work_dir} {remote_mount_path}"
603
+ else:
604
+ lower_dir = remote_mount_path
605
+ setup_cmd = f"mkdir -p {remote_mount_path}"
606
+
607
+ result = await self.execute(setup_cmd, timeout=30)
608
+ if result.exit_code != 0:
609
+ return False
610
+
611
+ # Build SSHFS command
612
+ # Note: uses 127.0.0.1 (not localhost) because of reverse tunnel and VM DNS issues
613
+ user_prefix = f"{local_user}@" if local_user else ""
614
+ sshfs_cmd = (
615
+ f"sshfs -o ro,IdentityFile={identity_file},StrictHostKeyChecking=no,UserKnownHostsFile=/dev/null,reconnect,ServerAliveInterval=15 "
616
+ f"-p {ssh_port} {user_prefix}127.0.0.1:{local_path} {lower_dir}"
617
+ )
618
+ result = await self.execute(sshfs_cmd, timeout=60)
619
+ if result.exit_code != 0:
620
+ return False
621
+
622
+ # Set up fuse-overlayfs if requested (works with FUSE lower dirs)
623
+ if use_overlay:
624
+ await self.execute("which fuse-overlayfs || apt-get install -y fuse-overlayfs", timeout=60)
625
+ overlay_cmd = (
626
+ f"fuse-overlayfs -o lowerdir={lower_dir},upperdir={upper_dir},workdir={work_dir} {remote_mount_path}"
627
+ )
628
+ result = await self.execute(overlay_cmd, timeout=30)
629
+ if result.exit_code != 0:
630
+ return False
631
+
632
+ return True
633
+
634
+ async def ensure_nfs(self) -> bool:
635
+ """Ensure NFS client is installed on the VM.
636
+
637
+ Returns:
638
+ True if nfs-common is available.
639
+ """
640
+ result = await self.execute(
641
+ "which mount.nfs || apt-get install -y nfs-common",
642
+ timeout=60,
643
+ )
644
+ return result.exit_code == 0
645
+
646
+ async def mount_nfs(
647
+ self,
648
+ remote_mount_path: str,
649
+ nfs_port: int = 2049,
650
+ use_overlay: bool = True,
651
+ ) -> bool:
652
+ """Mount local NFS export on the VM via reverse tunnel.
653
+
654
+ Expects a reverse tunnel forwarding nfs_port on VM to local NFS server.
655
+
656
+ Args:
657
+ remote_mount_path: Where to mount on the VM.
658
+ nfs_port: Port for NFS (default 2049).
659
+ use_overlay: Use overlayfs for copy-on-write (default True).
660
+
661
+ Returns:
662
+ True if mount succeeded.
663
+ """
664
+ # Create mount directories
665
+ # Upper/work dirs must be on local filesystem for overlay to work
666
+ if use_overlay:
667
+ lower_dir = f"{remote_mount_path}-lower"
668
+ upper_dir = f"/tmp/overlay{remote_mount_path}-upper"
669
+ work_dir = f"/tmp/overlay{remote_mount_path}-work"
670
+ setup_cmd = f"mkdir -p {lower_dir} {upper_dir} {work_dir} {remote_mount_path}"
671
+ else:
672
+ lower_dir = remote_mount_path
673
+ setup_cmd = f"mkdir -p {remote_mount_path}"
674
+
675
+ result = await self.execute(setup_cmd, timeout=30)
676
+ if result.exit_code != 0:
677
+ return False
678
+
679
+ # Mount via NFS through the tunnel
680
+ # 127.0.0.1 because reverse tunnel forwards to local NFS
681
+ mount_cmd = f"mount -t nfs -o ro,port={nfs_port},mountport={nfs_port},tcp,vers=4,nolock 127.0.0.1:/ {lower_dir}"
682
+ result = await self.execute(mount_cmd, timeout=60)
683
+ if result.exit_code != 0:
684
+ return False
685
+
686
+ # Set up fuse-overlayfs if requested
687
+ if use_overlay:
688
+ await self.execute("which fuse-overlayfs || apt-get install -y fuse-overlayfs", timeout=60)
689
+ overlay_cmd = (
690
+ f"fuse-overlayfs -o lowerdir={lower_dir},upperdir={upper_dir},workdir={work_dir} {remote_mount_path}"
691
+ )
692
+ result = await self.execute(overlay_cmd, timeout=30)
693
+ if result.exit_code != 0:
694
+ return False
695
+
696
+ return True
697
+
698
+ async def unmount_nfs(self, remote_mount_path: str, use_overlay: bool = True) -> bool:
699
+ """Unmount an NFS mount.
700
+
701
+ Args:
702
+ remote_mount_path: The mount path to unmount.
703
+ use_overlay: Whether overlay was used.
704
+
705
+ Returns:
706
+ True if unmount succeeded.
707
+ """
708
+ if use_overlay:
709
+ await self.execute(f"umount {remote_mount_path}", timeout=30)
710
+ lower_dir = f"{remote_mount_path}-lower"
711
+ result = await self.execute(f"umount {lower_dir}", timeout=30)
712
+ else:
713
+ result = await self.execute(f"umount {remote_mount_path}", timeout=30)
714
+
715
+ return result.exit_code == 0
716
+
717
+ async def unmount_sshfs(self, remote_mount_path: str, use_overlay: bool = True) -> bool:
718
+ """Unmount an SSHFS mount.
719
+
720
+ Args:
721
+ remote_mount_path: The mount path to unmount.
722
+ use_overlay: Whether overlay was used (unmounts both layers).
723
+
724
+ Returns:
725
+ True if unmount succeeded.
726
+ """
727
+ if use_overlay:
728
+ # Unmount overlay first, then sshfs
729
+ await self.execute(f"umount {remote_mount_path}", timeout=30)
730
+ lower_dir = f"{remote_mount_path}-lower"
731
+ result = await self.execute(f"fusermount -u {lower_dir}", timeout=30)
732
+ else:
733
+ result = await self.execute(f"fusermount -u {remote_mount_path}", timeout=30)
734
+
735
+ return result.exit_code == 0
736
+
737
+ def create_reverse_tunnel(
738
+ self,
739
+ private_key_path: str,
740
+ local_port: int = 22,
741
+ remote_port: int = 2222,
742
+ ) -> ReverseTunnel:
743
+ """Create a reverse tunnel for SSHFS mounts.
744
+
745
+ The reverse tunnel allows the VM to connect back to your local machine,
746
+ enabling SSHFS mounts of local directories.
747
+
748
+ Args:
749
+ private_key_path: Path to SSH private key.
750
+ local_port: Local SSH port (default 22).
751
+ remote_port: Port on VM that forwards to local (default 2222).
752
+
753
+ Returns:
754
+ ReverseTunnel instance (call .start() to activate).
755
+
756
+ Example:
757
+ tunnel = env.create_reverse_tunnel(key_path)
758
+ tunnel.start()
759
+ await env.mount_sshfs("/local/code", "/mnt/code", ssh_port=2222)
760
+ # ... do work ...
761
+ tunnel.stop()
762
+ """
763
+ return ReverseTunnel(
764
+ job_id=self.job_id,
765
+ private_key_path=private_key_path,
766
+ local_port=local_port,
767
+ remote_port=remote_port,
768
+ )
@@ -25,6 +25,7 @@ from plato._generated.api.v2.jobs import get_flows as jobs_get_flows
25
25
  from plato._generated.api.v2.jobs import public_url as jobs_public_url
26
26
  from plato._generated.api.v2.jobs import wait_for_ready as jobs_wait_for_ready
27
27
  from plato._generated.api.v2.sessions import add_job as sessions_add_job
28
+ from plato._generated.api.v2.sessions import add_ssh_key as sessions_add_ssh_key
28
29
  from plato._generated.api.v2.sessions import close as sessions_close
29
30
  from plato._generated.api.v2.sessions import connect_network as sessions_connect_network
30
31
  from plato._generated.api.v2.sessions import disk_snapshot as sessions_disk_snapshot
@@ -43,6 +44,8 @@ from plato._generated.api.v2.sessions import state as sessions_state
43
44
  from plato._generated.api.v2.sessions import wait_for_ready as sessions_wait_for_ready
44
45
  from plato._generated.models import (
45
46
  AddJobRequest,
47
+ AddSSHKeyRequest,
48
+ AddSSHKeyResponse,
46
49
  AppApiV2SchemasSessionCreateSnapshotRequest,
47
50
  AppApiV2SchemasSessionCreateSnapshotResponse,
48
51
  AppApiV2SchemasSessionEvaluateResponse,
@@ -465,6 +468,8 @@ class Session:
465
468
  job_id=ctx.job_id,
466
469
  alias=ctx.alias,
467
470
  artifact_id=ctx.artifact_id,
471
+ simulator=ctx.simulator,
472
+ status="running", # Environments are running after from_envs completes
468
473
  )
469
474
  for ctx in env_contexts
470
475
  ]
@@ -674,6 +679,8 @@ class Session:
674
679
  will do a fresh boot with the preserved disk state. This is faster to
675
680
  create and smaller to store than full snapshots.
676
681
 
682
+ Uses snapshot-store backend for chunk-based deduplication and efficient storage.
683
+
677
684
  Args:
678
685
  override_service: Override simulator/service name in artifact metadata.
679
686
  override_version: Override version/git_hash in artifact metadata.
@@ -883,6 +890,8 @@ class Session:
883
890
  job_id=job_id,
884
891
  alias=env.alias,
885
892
  artifact_id=response.env.artifact_id,
893
+ simulator=getattr(env, "simulator", None),
894
+ status="running", # Newly added environments are running
886
895
  )
887
896
 
888
897
  logger.info(f"Added job {job_id} (alias={env.alias}) to session {self.session_id}")
@@ -1134,6 +1143,42 @@ class Session:
1134
1143
  pass
1135
1144
  self._heartbeat_task = None
1136
1145
 
1146
+ # SSH
1147
+
1148
+ async def add_ssh_key(self, public_key: str, username: str = "root") -> AddSSHKeyResponse:
1149
+ """Add an SSH public key to all VMs in this session.
1150
+
1151
+ This allows SSH access to all environments in the session using the
1152
+ corresponding private key.
1153
+
1154
+ Args:
1155
+ public_key: The SSH public key content (e.g., from id_ed25519.pub).
1156
+ username: The user to add the key for (default: root).
1157
+
1158
+ Returns:
1159
+ AddSSHKeyResponse with success status.
1160
+
1161
+ Example:
1162
+ # Generate a keypair
1163
+ subprocess.run(["ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""])
1164
+ public_key = Path("key.pub").read_text()
1165
+
1166
+ # Add to session
1167
+ await session.add_ssh_key(public_key)
1168
+
1169
+ # Now SSH works for all envs
1170
+ for env in session.envs:
1171
+ ssh_info = env.get_ssh_info("key")
1172
+ subprocess.run(ssh_info.ssh_command("ls -la"))
1173
+ """
1174
+ request = AddSSHKeyRequest(public_key=public_key, username=username)
1175
+ return await sessions_add_ssh_key.asyncio(
1176
+ client=self._http,
1177
+ session_id=self.session_id,
1178
+ body=request,
1179
+ x_api_key=self._api_key,
1180
+ )
1181
+
1137
1182
  # Lifecycle
1138
1183
 
1139
1184
  async def close(self) -> None: