plato-sdk-v2 2.3.0__py3-none-any.whl → 2.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plato/agents/__init__.py CHANGED
@@ -85,6 +85,8 @@ __all__ = [
85
85
  "span",
86
86
  "log_event",
87
87
  "upload_artifacts",
88
+ "upload_artifact",
89
+ "upload_checkpoint",
88
90
  "reset_logging",
89
91
  ]
90
92
 
@@ -102,7 +104,9 @@ from plato.agents.logging import (
102
104
  log_event,
103
105
  reset_logging,
104
106
  span,
107
+ upload_artifact,
105
108
  upload_artifacts,
109
+ upload_checkpoint,
106
110
  )
107
111
  from plato.agents.runner import run_agent
108
112
  from plato.agents.trajectory import (
plato/agents/logging.py CHANGED
@@ -399,3 +399,117 @@ async def upload_artifacts(dir_path: str) -> str | None:
399
399
  except Exception as e:
400
400
  logger.warning(f"Failed to upload artifacts: {e}")
401
401
  return None
402
+
403
+
404
+ # =============================================================================
405
+ # Artifact Upload (Generic)
406
+ # =============================================================================
407
+
408
+
409
+ async def upload_artifact(
410
+ data: bytes,
411
+ artifact_type: str,
412
+ filename: str | None = None,
413
+ extra: dict[str, Any] | None = None,
414
+ ) -> dict[str, Any] | None:
415
+ """Upload an artifact to Chronos.
416
+
417
+ Artifacts are stored in S3 and linked to the session in the database.
418
+
419
+ Args:
420
+ data: Raw bytes of the artifact
421
+ artifact_type: Type of artifact (e.g., "state", "logs", "trajectory")
422
+ filename: Optional filename for the artifact
423
+ extra: Optional extra data to store with the artifact
424
+
425
+ Returns:
426
+ Dict with artifact_id and s3_url if successful, None otherwise.
427
+ """
428
+ chronos = _ChronosLogger._instance
429
+ if not chronos or not chronos.enabled:
430
+ return None
431
+
432
+ try:
433
+ data_base64 = base64.b64encode(data).decode("utf-8")
434
+ logger.info(f"Uploading artifact: type={artifact_type}, size={len(data)} bytes")
435
+ except Exception as e:
436
+ logger.warning(f"Failed to encode artifact: {e}")
437
+ return None
438
+
439
+ try:
440
+ async with httpx.AsyncClient(timeout=120.0) as client:
441
+ response = await client.post(
442
+ f"{chronos.callback_url}/artifact",
443
+ json={
444
+ "session_id": chronos.session_id,
445
+ "artifact_type": artifact_type,
446
+ "data_base64": data_base64,
447
+ "filename": filename,
448
+ "extra": extra or {},
449
+ },
450
+ )
451
+ if response.status_code == 200:
452
+ result = response.json()
453
+ logger.info(f"Uploaded artifact: {result}")
454
+ return result
455
+ else:
456
+ logger.warning(f"Failed to upload artifact: {response.status_code} {response.text}")
457
+ return None
458
+ except Exception as e:
459
+ logger.warning(f"Failed to upload artifact: {e}")
460
+ return None
461
+
462
+
463
+ # =============================================================================
464
+ # Checkpoint Upload
465
+ # =============================================================================
466
+
467
+
468
+ async def upload_checkpoint(
469
+ step_number: int,
470
+ env_snapshots: dict[str, str],
471
+ state_artifact_id: str | None = None,
472
+ extra: dict[str, Any] | None = None,
473
+ ) -> dict[str, Any] | None:
474
+ """Upload checkpoint data to Chronos.
475
+
476
+ A checkpoint includes:
477
+ - Environment snapshots (artifact IDs per env alias)
478
+ - State artifact (git bundle of /state directory)
479
+ - Extra data (step number, timestamp, etc.)
480
+
481
+ Args:
482
+ step_number: The step number when this checkpoint was created
483
+ env_snapshots: Dict mapping env alias to artifact_id
484
+ state_artifact_id: Artifact ID of the state bundle (from upload_artifact)
485
+ extra: Optional additional data
486
+
487
+ Returns:
488
+ Dict with checkpoint_id if successful, None otherwise.
489
+ """
490
+ chronos = _ChronosLogger._instance
491
+ if not chronos or not chronos.enabled:
492
+ return None
493
+
494
+ try:
495
+ async with httpx.AsyncClient(timeout=60.0) as client:
496
+ response = await client.post(
497
+ f"{chronos.callback_url}/checkpoint",
498
+ json={
499
+ "session_id": chronos.session_id,
500
+ "step_number": step_number,
501
+ "env_snapshots": env_snapshots,
502
+ "state_artifact_id": state_artifact_id,
503
+ "extra": extra or {},
504
+ },
505
+ )
506
+ if response.status_code == 200:
507
+ result = response.json()
508
+ logger.info(f"Uploaded checkpoint: step={step_number}, checkpoint_id={result.get('checkpoint_id')}")
509
+ return result
510
+ else:
511
+ logger.warning(f"Failed to upload checkpoint: {response.status_code} {response.text}")
512
+ return None
513
+ except Exception as e:
514
+ logger.warning(f"Failed to upload checkpoint: {e}")
515
+ return None
plato/agents/runner.py CHANGED
@@ -96,6 +96,8 @@ async def run_agent(
96
96
  f"{logs_dir}:/logs",
97
97
  "-v",
98
98
  f"{config_file.name}:/config.json:ro",
99
+ "-v",
100
+ "/var/run/docker.sock:/var/run/docker.sock",
99
101
  "-w",
100
102
  "/workspace",
101
103
  ]
@@ -116,17 +118,45 @@ async def run_agent(
116
118
  stderr=asyncio.subprocess.STDOUT,
117
119
  )
118
120
 
119
- # Stream output line by line
121
+ # Stream output line by line, collecting for error reporting
122
+ output_lines: list[str] = []
120
123
  assert process.stdout is not None
121
124
  while True:
122
125
  line = await process.stdout.readline()
123
126
  if not line:
124
127
  break
125
- logger.info(f"[agent] {line.decode().rstrip()}")
128
+ decoded_line = line.decode().rstrip()
129
+ output_lines.append(decoded_line)
130
+ logger.info(f"[agent] {decoded_line}")
126
131
 
127
132
  await process.wait()
128
133
 
129
134
  if process.returncode != 0:
135
+ # Get last N lines of output for error context
136
+ error_context = "\n".join(output_lines[-50:]) if output_lines else "No output captured"
137
+
138
+ # Log error event with container output
139
+ await log_event(
140
+ span_type="error",
141
+ content=f"Agent failed with exit code {process.returncode}",
142
+ source="agent",
143
+ extra={
144
+ "exit_code": process.returncode,
145
+ "image": image,
146
+ "agent_name": agent_name,
147
+ "output": error_context,
148
+ "output_line_count": len(output_lines),
149
+ },
150
+ )
151
+
152
+ agent_span.set_extra(
153
+ {
154
+ "error": True,
155
+ "exit_code": process.returncode,
156
+ "output": error_context,
157
+ }
158
+ )
159
+
130
160
  raise RuntimeError(f"Agent failed with exit code {process.returncode}")
131
161
 
132
162
  agent_span.log("Agent completed successfully")
plato/v1/cli/main.py CHANGED
@@ -11,7 +11,6 @@ from dotenv import load_dotenv
11
11
  from plato.v1.cli.agent import agent_app
12
12
  from plato.v1.cli.pm import pm_app
13
13
  from plato.v1.cli.sandbox import sandbox_app
14
- from plato.v1.cli.sim import sim_app
15
14
  from plato.v1.cli.utils import console
16
15
  from plato.v1.cli.world import world_app
17
16
 
@@ -70,7 +69,6 @@ app = typer.Typer(help="[bold blue]Plato CLI[/bold blue] - Manage Plato environm
70
69
  # Register sub-apps
71
70
  app.add_typer(sandbox_app, name="sandbox")
72
71
  app.add_typer(pm_app, name="pm")
73
- app.add_typer(sim_app, name="sim")
74
72
  app.add_typer(agent_app, name="agent")
75
73
  app.add_typer(world_app, name="world")
76
74
 
plato/v1/cli/sandbox.py CHANGED
@@ -516,7 +516,6 @@ def sandbox_start(
516
516
  console.print(f" [cyan]Public URL:[/cyan] {display_url}")
517
517
  if ssh_host and ssh_config_path:
518
518
  console.print(f" [cyan]SSH:[/cyan] ssh -F {ssh_config_path} {ssh_host}")
519
- console.print(" [cyan]Docker:[/cyan] export DOCKER_HOST=unix:///var/run/docker-user.sock")
520
519
  console.print(f"\n[dim]State saved to {SANDBOX_FILE}[/dim]")
521
520
 
522
521
  except Exception as e:
@@ -1737,6 +1736,148 @@ def sandbox_state_cmd(
1737
1736
  raise typer.Exit(1)
1738
1737
 
1739
1738
 
1739
+ @sandbox_app.command(name="clear-audit")
1740
+ def sandbox_clear_audit(
1741
+ config_path: Path | None = typer.Option(None, "--config-path", help="Path to plato-config.yml"),
1742
+ dataset: str = typer.Option("base", "--dataset", "-d", help="Dataset name"),
1743
+ json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
1744
+ ):
1745
+ """
1746
+ Clear the audit_log table(s) in the sandbox database.
1747
+
1748
+ Truncates all audit_log tables to reset mutation tracking. Use this after
1749
+ initial setup/login to clear any mutations before running a clean login flow.
1750
+
1751
+ REQUIRES:
1752
+
1753
+ .sandbox.yaml in current directory (created by 'plato sandbox start')
1754
+ plato-config.yml with database listener config
1755
+
1756
+ USAGE:
1757
+
1758
+ plato sandbox clear-audit # Uses plato-config.yml in cwd
1759
+ plato sandbox clear-audit -d base # Specify dataset
1760
+ plato sandbox clear-audit --json # JSON output
1761
+
1762
+ WORKFLOW POSITION:
1763
+
1764
+ 1. plato sandbox start -c
1765
+ 2. plato sandbox start-services
1766
+ 3. plato sandbox start-worker --wait
1767
+ 4. (agent does initial login/setup, generating mutations)
1768
+ 5. plato sandbox clear-audit ← you are here
1769
+ 6. plato sandbox flow ← clean login flow
1770
+ 7. plato sandbox state --verify-no-mutations ← should pass now
1771
+ 8. plato sandbox snapshot
1772
+ """
1773
+ state = require_sandbox_state()
1774
+
1775
+ # Get SSH info
1776
+ ssh_host = state.get("ssh_host")
1777
+ ssh_config_path = state.get("ssh_config_path")
1778
+
1779
+ if not ssh_host or not ssh_config_path:
1780
+ console.print("[red]❌ SSH not configured. Missing ssh_host or ssh_config_path in .sandbox.yaml[/red]")
1781
+ raise typer.Exit(1)
1782
+
1783
+ # Find plato-config.yml
1784
+ if not config_path:
1785
+ config_path = Path.cwd() / "plato-config.yml"
1786
+ if not config_path.exists():
1787
+ config_path = Path.cwd() / "plato-config.yaml"
1788
+ if not config_path.exists():
1789
+ console.print("[red]❌ plato-config.yml not found[/red]")
1790
+ raise typer.Exit(1)
1791
+
1792
+ with open(config_path) as f:
1793
+ plato_config = yaml.safe_load(f)
1794
+
1795
+ # Get dataset config
1796
+ datasets = plato_config.get("datasets", {})
1797
+ if dataset not in datasets:
1798
+ console.print(f"[red]❌ Dataset '{dataset}' not found[/red]")
1799
+ raise typer.Exit(1)
1800
+
1801
+ dataset_config = datasets[dataset]
1802
+ listeners = dataset_config.get("listeners", {})
1803
+
1804
+ # Find DB listeners
1805
+ db_listeners = []
1806
+ for name, listener in listeners.items():
1807
+ if isinstance(listener, dict) and listener.get("type") == "db":
1808
+ db_listeners.append((name, listener))
1809
+
1810
+ if not db_listeners:
1811
+ console.print("[red]❌ No database listeners found in plato-config.yml[/red]")
1812
+ console.print("[yellow]Expected: datasets.<dataset>.listeners.<name>.type = 'db'[/yellow]")
1813
+ raise typer.Exit(1)
1814
+
1815
+ results = []
1816
+
1817
+ for name, db_config in db_listeners:
1818
+ db_type = db_config.get("db_type", "postgresql").lower()
1819
+ db_host = db_config.get("db_host", "127.0.0.1")
1820
+ db_port = db_config.get("db_port", 5432 if db_type == "postgresql" else 3306)
1821
+ db_user = db_config.get("db_user", "postgres" if db_type == "postgresql" else "root")
1822
+ db_password = db_config.get("db_password", "")
1823
+ db_database = db_config.get("db_database", "postgres")
1824
+
1825
+ if not json_output:
1826
+ console.print(f"[cyan]Clearing audit_log for listener '{name}' ({db_type})...[/cyan]")
1827
+
1828
+ # Build SQL command based on db_type
1829
+ if db_type == "postgresql":
1830
+ sql_cmd = f"PGPASSWORD='{db_password}' psql -h {db_host} -p {db_port} -U {db_user} -d {db_database} -c 'TRUNCATE TABLE audit_log RESTART IDENTITY CASCADE'"
1831
+ elif db_type in ("mysql", "mariadb"):
1832
+ sql_cmd = f"mysql -h {db_host} -P {db_port} -u {db_user} -p'{db_password}' {db_database} -e 'SET FOREIGN_KEY_CHECKS=0; DELETE FROM audit_log; SET FOREIGN_KEY_CHECKS=1;'"
1833
+ else:
1834
+ if not json_output:
1835
+ console.print(f"[yellow]⚠ Unsupported db_type '{db_type}' for listener '{name}'[/yellow]")
1836
+ results.append({"listener": name, "success": False, "error": f"Unsupported db_type: {db_type}"})
1837
+ continue
1838
+
1839
+ # Run via SSH
1840
+ ret, stdout, stderr = _run_ssh_command(ssh_config_path, ssh_host, sql_cmd)
1841
+
1842
+ if ret == 0:
1843
+ if not json_output:
1844
+ console.print(f"[green]✅ Cleared audit_log for '{name}'[/green]")
1845
+ results.append({"listener": name, "success": True})
1846
+ else:
1847
+ if not json_output:
1848
+ console.print(f"[red]❌ Failed to clear audit_log for '{name}': {stderr}[/red]")
1849
+ results.append({"listener": name, "success": False, "error": stderr})
1850
+
1851
+ # Call state API to refresh in-memory mutation cache
1852
+ session_id = state.get("session_id")
1853
+ api_key = require_api_key()
1854
+ if session_id:
1855
+ if not json_output:
1856
+ console.print("[dim]Refreshing state cache...[/dim]")
1857
+ try:
1858
+ with get_http_client() as client:
1859
+ sessions_state.sync(
1860
+ client=client,
1861
+ session_id=session_id,
1862
+ x_api_key=api_key,
1863
+ )
1864
+ except Exception as e:
1865
+ if not json_output:
1866
+ console.print(f"[yellow]⚠ Failed to refresh state cache: {e}[/yellow]")
1867
+
1868
+ if json_output:
1869
+ console.print(json.dumps({"results": results}))
1870
+ else:
1871
+ # Summary
1872
+ success_count = sum(1 for r in results if r["success"])
1873
+ total = len(results)
1874
+ if success_count == total:
1875
+ console.print(f"\n[green]✅ All {total} audit logs cleared successfully[/green]")
1876
+ else:
1877
+ console.print(f"\n[yellow]⚠ {success_count}/{total} audit logs cleared[/yellow]")
1878
+ raise typer.Exit(1)
1879
+
1880
+
1740
1881
  @sandbox_app.command(name="audit-ui")
1741
1882
  def sandbox_audit_ui():
1742
1883
  """
@@ -1786,31 +1927,24 @@ def sandbox_audit_ui():
1786
1927
 
1787
1928
 
1788
1929
  def _copy_files_respecting_gitignore(src_dir: Path, dst_dir: Path) -> None:
1789
- """Copy files from src to dst respecting .gitignore rules."""
1790
- # Copy .gitignore first if it exists
1791
- gitignore_src = src_dir / ".gitignore"
1792
- if gitignore_src.exists():
1793
- gitignore_dst = dst_dir / ".gitignore"
1794
- if not gitignore_dst.exists():
1795
- shutil.copy2(gitignore_src, gitignore_dst)
1796
-
1797
- def should_copy(file_path: Path) -> bool:
1798
- """Check if file should be copied (not ignored by git)."""
1799
- base_name = file_path.name
1800
- # Skip .git directories and .plato-hub.json
1801
- if base_name.startswith(".git") or base_name == ".plato-hub.json":
1802
- return False
1803
- # Use git check-ignore to respect .gitignore rules
1804
- try:
1805
- result = subprocess.run(
1806
- ["git", "check-ignore", "-q", str(file_path)],
1807
- cwd=src_dir,
1808
- capture_output=True,
1809
- )
1810
- # git check-ignore returns 0 if path IS ignored, 1 if NOT ignored
1811
- return result.returncode != 0
1812
- except Exception:
1930
+ """Copy files from src to dst, skipping .git/ and .plato-hub.json.
1931
+
1932
+ Note: This function intentionally does NOT respect .gitignore because
1933
+ start-services needs to copy all workspace files to the VM, including
1934
+ config files that might be gitignored locally (like docker-compose.yml
1935
+ in a 'base/' directory).
1936
+ """
1937
+
1938
+ def should_skip(rel_path: Path) -> bool:
1939
+ """Check if path should be skipped."""
1940
+ parts = rel_path.parts
1941
+ # Skip anything inside .git/ directory
1942
+ if ".git" in parts:
1943
+ return True
1944
+ # Skip .plato-hub.json
1945
+ if rel_path.name == ".plato-hub.json":
1813
1946
  return True
1947
+ return False
1814
1948
 
1815
1949
  # Walk through source directory
1816
1950
  for src_path in src_dir.rglob("*"):
@@ -1820,8 +1954,8 @@ def _copy_files_respecting_gitignore(src_dir: Path, dst_dir: Path) -> None:
1820
1954
  if str(rel_path) == ".":
1821
1955
  continue
1822
1956
 
1823
- # Check if should copy
1824
- if not should_copy(src_path):
1957
+ # Check if should skip
1958
+ if should_skip(rel_path):
1825
1959
  continue
1826
1960
 
1827
1961
  dst_path = dst_dir / rel_path
@@ -2007,6 +2141,34 @@ def sandbox_start_services(
2007
2141
 
2008
2142
  try:
2009
2143
  with get_http_client() as client:
2144
+
2145
+ def start_services_on_vm(repo_dir: str) -> list[dict[str, str]]:
2146
+ """Start docker compose services on the VM."""
2147
+ services_started: list[dict[str, str]] = []
2148
+ for svc_name, svc_config in services_config.items():
2149
+ svc_type = svc_config.get("type", "")
2150
+ if svc_type == "docker-compose":
2151
+ compose_file = svc_config.get("file", "docker-compose.yml")
2152
+ compose_cmd = f"cd {repo_dir} && docker compose -f {compose_file} up -d"
2153
+
2154
+ if not json_output:
2155
+ console.print(f"[cyan] Starting docker compose service: {svc_name}...[/cyan]")
2156
+
2157
+ ret, stdout, stderr = _run_ssh_command(ssh_config_path, ssh_host, compose_cmd)
2158
+ if ret != 0:
2159
+ console.print(f"[red]❌ Failed to start service '{svc_name}': {stderr}[/red]")
2160
+ raise typer.Exit(1)
2161
+
2162
+ services_started.append({"name": svc_name, "type": "docker-compose", "file": compose_file})
2163
+ if not json_output:
2164
+ console.print(f"[green] ✓ Started docker compose service: {svc_name}[/green]")
2165
+ else:
2166
+ if not json_output:
2167
+ console.print(
2168
+ f"[yellow] ⚠ Skipped service '{svc_name}' (unknown type: {svc_type})[/yellow]"
2169
+ )
2170
+ return services_started
2171
+
2010
2172
  # Step 1: Get Gitea credentials
2011
2173
  if not json_output:
2012
2174
  console.print("[cyan]Step 1: Getting Gitea credentials...[/cyan]")
@@ -2064,6 +2226,8 @@ def sandbox_start_services(
2064
2226
  if not json_output:
2065
2227
  console.print("[cyan]Step 4: Pushing code to hub...[/cyan]")
2066
2228
 
2229
+ repo_dir = f"/home/plato/worktree/{service_name}"
2230
+
2067
2231
  with tempfile.TemporaryDirectory(prefix="plato-hub-") as temp_dir:
2068
2232
  temp_repo = Path(temp_dir) / "repo"
2069
2233
 
@@ -2148,8 +2312,6 @@ def sandbox_start_services(
2148
2312
  if not json_output:
2149
2313
  console.print("[cyan]Step 5: Cloning repo on VM...[/cyan]")
2150
2314
 
2151
- repo_dir = f"/home/plato/worktree/{service_name}"
2152
-
2153
2315
  # Create worktree directory
2154
2316
  _run_ssh_command(ssh_config_path, ssh_host, "mkdir -p /home/plato/worktree")
2155
2317
 
@@ -2170,27 +2332,7 @@ def sandbox_start_services(
2170
2332
  if not json_output:
2171
2333
  console.print("[cyan]Step 6: Starting services...[/cyan]")
2172
2334
 
2173
- services_started = []
2174
- for svc_name, svc_config in services_config.items():
2175
- svc_type = svc_config.get("type", "")
2176
- if svc_type == "docker-compose":
2177
- compose_file = svc_config.get("file", "docker-compose.yml")
2178
- compose_cmd = f"cd {repo_dir} && DOCKER_HOST=unix:///var/run/docker-user.sock docker compose -f {compose_file} up -d"
2179
-
2180
- if not json_output:
2181
- console.print(f"[cyan] Starting docker compose service: {svc_name}...[/cyan]")
2182
-
2183
- ret, stdout, stderr = _run_ssh_command(ssh_config_path, ssh_host, compose_cmd)
2184
- if ret != 0:
2185
- console.print(f"[red]❌ Failed to start service '{svc_name}': {stderr}[/red]")
2186
- raise typer.Exit(1)
2187
-
2188
- services_started.append({"name": svc_name, "type": "docker-compose", "file": compose_file})
2189
- if not json_output:
2190
- console.print(f"[green] ✓ Started docker compose service: {svc_name}[/green]")
2191
- else:
2192
- if not json_output:
2193
- console.print(f"[yellow] ⚠ Skipped service '{svc_name}' (unknown type: {svc_type})[/yellow]")
2335
+ services_started = start_services_on_vm(repo_dir)
2194
2336
 
2195
2337
  # Output results
2196
2338
  if json_output:
plato/v1/cli/ssh.py CHANGED
@@ -9,9 +9,21 @@ from cryptography.hazmat.primitives import serialization
9
9
  from cryptography.hazmat.primitives.asymmetric import ed25519
10
10
 
11
11
 
12
+ def get_plato_dir() -> Path:
13
+ """Get the directory for plato config/SSH files.
14
+
15
+ Uses /workspace/.plato if /workspace exists (container environment),
16
+ otherwise uses ~/.plato (local development).
17
+ """
18
+ workspace = Path("/workspace")
19
+ if workspace.exists() and workspace.is_dir():
20
+ return workspace / ".plato"
21
+ return Path.home() / ".plato"
22
+
23
+
12
24
  def get_next_sandbox_number() -> int:
13
- """Find next available sandbox number by checking existing config files in ~/.plato/."""
14
- plato_dir = Path.home() / ".plato"
25
+ """Find next available sandbox number by checking existing config files."""
26
+ plato_dir = get_plato_dir()
15
27
  if not plato_dir.exists():
16
28
  return 1
17
29
 
@@ -35,7 +47,7 @@ def generate_ssh_key_pair(sandbox_num: int) -> tuple[str, str]:
35
47
 
36
48
  Returns (public_key_str, private_key_path).
37
49
  """
38
- plato_dir = Path.home() / ".plato"
50
+ plato_dir = get_plato_dir()
39
51
  plato_dir.mkdir(mode=0o700, exist_ok=True)
40
52
 
41
53
  private_key_path = plato_dir / f"ssh_{sandbox_num}_key"
@@ -160,7 +172,7 @@ def create_ssh_config(
160
172
  TCPKeepAlive yes
161
173
  """
162
174
 
163
- plato_dir = Path.home() / ".plato"
175
+ plato_dir = get_plato_dir()
164
176
  plato_dir.mkdir(mode=0o700, exist_ok=True)
165
177
 
166
178
  config_path = plato_dir / f"ssh_{sandbox_num}.conf"