plato-sdk-v2 2.6.2__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. plato/_generated/__init__.py +1 -1
  2. plato/_generated/api/v2/__init__.py +2 -1
  3. plato/_generated/api/v2/networks/__init__.py +23 -0
  4. plato/_generated/api/v2/networks/add_member.py +75 -0
  5. plato/_generated/api/v2/networks/create_network.py +70 -0
  6. plato/_generated/api/v2/networks/delete_network.py +68 -0
  7. plato/_generated/api/v2/networks/get_network.py +69 -0
  8. plato/_generated/api/v2/networks/list_members.py +69 -0
  9. plato/_generated/api/v2/networks/list_networks.py +74 -0
  10. plato/_generated/api/v2/networks/remove_member.py +73 -0
  11. plato/_generated/api/v2/networks/update_member.py +80 -0
  12. plato/_generated/api/v2/sessions/__init__.py +4 -0
  13. plato/_generated/api/v2/sessions/add_ssh_key.py +81 -0
  14. plato/_generated/api/v2/sessions/connect_network.py +89 -0
  15. plato/_generated/models/__init__.py +145 -24
  16. plato/v1/cli/agent.py +45 -52
  17. plato/v1/cli/chronos.py +46 -58
  18. plato/v1/cli/main.py +14 -25
  19. plato/v1/cli/pm.py +129 -98
  20. plato/v1/cli/proxy.py +343 -0
  21. plato/v1/cli/sandbox.py +421 -425
  22. plato/v1/cli/ssh.py +12 -167
  23. plato/v1/cli/verify.py +79 -55
  24. plato/v1/cli/world.py +13 -12
  25. plato/v2/async_/client.py +24 -2
  26. plato/v2/async_/session.py +48 -0
  27. plato/v2/sync/client.py +24 -2
  28. plato/v2/sync/session.py +48 -0
  29. {plato_sdk_v2-2.6.2.dist-info → plato_sdk_v2-2.7.1.dist-info}/METADATA +1 -1
  30. {plato_sdk_v2-2.6.2.dist-info → plato_sdk_v2-2.7.1.dist-info}/RECORD +32 -20
  31. {plato_sdk_v2-2.6.2.dist-info → plato_sdk_v2-2.7.1.dist-info}/WHEEL +0 -0
  32. {plato_sdk_v2-2.6.2.dist-info → plato_sdk_v2-2.7.1.dist-info}/entry_points.txt +0 -0
plato/v1/cli/sandbox.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Sandbox CLI commands for Plato."""
2
2
 
3
+ import asyncio
3
4
  import base64
4
5
  import io
5
6
  import json
@@ -13,11 +14,12 @@ import tempfile
13
14
  import time
14
15
  from datetime import datetime, timezone
15
16
  from pathlib import Path
16
- from urllib.parse import quote
17
+ from urllib.parse import quote, quote_plus
17
18
 
18
19
  import typer
19
20
  import yaml
20
21
  from rich.logging import RichHandler
22
+ from sqlalchemy import create_engine, text
21
23
 
22
24
  from plato._generated.api.v1.gitea import (
23
25
  create_simulator_repository,
@@ -25,12 +27,18 @@ from plato._generated.api.v1.gitea import (
25
27
  get_gitea_credentials,
26
28
  get_simulator_repository,
27
29
  )
28
- from plato._generated.api.v1.sandbox import setup_root_access, setup_sandbox, start_worker
30
+ from plato._generated.api.v1.sandbox import start_worker
29
31
  from plato._generated.api.v2.jobs import get_flows as jobs_get_flows
30
32
  from plato._generated.api.v2.jobs import state as jobs_state
33
+ from plato._generated.api.v2.sessions import (
34
+ add_ssh_key as sessions_add_ssh_key,
35
+ )
31
36
  from plato._generated.api.v2.sessions import (
32
37
  close as sessions_close,
33
38
  )
39
+ from plato._generated.api.v2.sessions import (
40
+ connect_network as sessions_connect_network,
41
+ )
34
42
  from plato._generated.api.v2.sessions import (
35
43
  execute as sessions_execute,
36
44
  )
@@ -47,17 +55,19 @@ from plato._generated.api.v2.sessions import (
47
55
  state as sessions_state,
48
56
  )
49
57
  from plato._generated.models import (
50
- AppSchemasBuildModelsSetupSandboxRequest,
58
+ AddSSHKeyRequest,
51
59
  AppSchemasBuildModelsSimConfigCompute,
52
60
  AppSchemasBuildModelsSimConfigDataset,
53
61
  AppSchemasBuildModelsSimConfigMetadata,
62
+ ConnectNetworkRequest,
54
63
  CreateCheckpointRequest,
55
64
  ExecuteCommandRequest,
56
65
  Flow,
57
- SetupRootPasswordRequest,
58
66
  VMManagementRequest,
59
67
  )
60
- from plato.v1.cli.ssh import setup_ssh_for_sandbox
68
+ from plato.v1.cli.proxy import ssh as gateway_ssh_command
69
+ from plato.v1.cli.proxy import tunnel as gateway_tunnel_command
70
+ from plato.v1.cli.ssh import generate_ssh_key_pair
61
71
  from plato.v1.cli.utils import (
62
72
  SANDBOX_FILE,
63
73
  console,
@@ -74,6 +84,7 @@ from plato.v1.cli.verify import sandbox_verify_app
74
84
  from plato.v2.async_.flow_executor import FlowExecutor
75
85
  from plato.v2.sync.client import Plato as PlatoV2
76
86
  from plato.v2.types import Env, SimConfigCompute
87
+ from plato.v2.utils.proxy_tunnel import ProxyTunnel, find_free_port
77
88
 
78
89
  # UUID pattern for detecting artifact IDs in colon notation
79
90
  UUID_PATTERN = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", re.IGNORECASE)
@@ -81,6 +92,10 @@ UUID_PATTERN = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-
81
92
  sandbox_app = typer.Typer(help="Manage sandboxes for simulator development")
82
93
  sandbox_app.add_typer(sandbox_verify_app, name="verify")
83
94
 
95
+ # Register gateway SSH/tunnel commands
96
+ sandbox_app.command(name="ssh")(gateway_ssh_command)
97
+ sandbox_app.command(name="tunnel")(gateway_tunnel_command)
98
+
84
99
 
85
100
  def format_public_url_with_router_target(public_url: str | None, service_name: str | None) -> str | None:
86
101
  """Format public URL with _plato_router_target parameter for browser access.
@@ -129,42 +144,45 @@ def sandbox_start(
129
144
  disk: int = typer.Option(10240, "--disk", help="Disk in MB (blank VM)"),
130
145
  # Common options
131
146
  timeout: int = typer.Option(1800, "--timeout", help="VM lifetime in seconds (default: 30 minutes)"),
132
- no_reset: bool = typer.Option(False, "--no-reset", help="Skip initial reset after ready"),
147
+ connect_network: bool = typer.Option(
148
+ True, "--network/--no-network", help="Connect VMs to WireGuard network for SSH access (default: enabled)"
149
+ ),
133
150
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
134
151
  working_dir: Path = typer.Option(
135
152
  None, "--working-dir", "-w", help="Working directory for .sandbox.yaml and .plato/"
136
153
  ),
137
154
  ):
138
- """
139
- Start a sandbox environment.
140
-
141
- THREE MODES (pick one):
142
-
143
- 1. FROM CONFIG (-c): Use plato-config.yml in current directory
144
-
145
- plato sandbox start -c
146
- plato sandbox start -c -d base
147
-
148
- 2. FROM SIMULATOR/ARTIFACT (-s or -a): Start from existing artifact
149
-
150
- -s <simulator> Latest tag
151
- -s <simulator>:<tag> Specific tag
152
- -s <simulator>:<artifact-uuid> Specific artifact (UUID detected)
153
- -s <simulator> -a <artifact-uuid> Explicit artifact
154
- -a <artifact-uuid> Artifact only (no simulator name)
155
-
156
- 3. BLANK VM (-b): Create fresh VM with custom specs
157
-
158
- plato sandbox start -b --service myapp
159
- plato sandbox start -b --service myapp --cpus 4 --memory 2048
160
-
161
- EXAMPLES:
162
-
163
- plato sandbox start -c # From config
164
- plato sandbox start -s espocrm # Latest artifact
165
- plato sandbox start -s espocrm:staging # Staging tag
166
- plato sandbox start -s espocrm:e9c25ca5-1234-5678-... # Specific artifact
167
- plato sandbox start -a e9c25ca5-1234-5678-9abc-... # Artifact only
155
+ """Start a sandbox environment for simulator development.
156
+
157
+ Creates a VM that can be used to develop and test simulators. You must pick exactly
158
+ one mode to specify how the sandbox should be created.
159
+
160
+ Mode Options (pick exactly one):
161
+ -c, --from-config: Create VM using settings from plato-config.yml in the current
162
+ directory. Uses the compute specs (cpus, memory, disk) from the config file.
163
+ -s, --simulator: Start from an existing simulator. Supports formats:
164
+ '-s name' (latest tag), '-s name:tag' (specific tag), '-s name:uuid' (specific artifact)
165
+ -a, --artifact-id: Start directly from a specific artifact UUID
166
+ -b, --blank: Create a blank VM with custom specs (requires --service)
167
+
168
+ Config Mode Options:
169
+ -d, --dataset: Which dataset from the config to use (default: "base")
170
+
171
+ Simulator Mode Options:
172
+ -t, --tag: Artifact tag to use (default: "latest")
173
+
174
+ Blank VM Options:
175
+ --service: Service name for the blank VM (required with -b)
176
+ --cpus: Number of CPUs (default: 2)
177
+ --memory: Memory in MB (default: 1024)
178
+ --disk: Disk size in MB (default: 10240)
179
+
180
+ Common Options:
181
+ --timeout: VM lifetime in seconds before auto-shutdown (default: 1800 = 30 min)
182
+ --no-reset: Skip the initial environment reset after the VM is ready
183
+ --no-network: Disable WireGuard network connection (enabled by default for SSH access)
184
+ -j, --json: Output results as JSON instead of formatted text
185
+ -w, --working-dir: Directory to store .sandbox.yaml and .plato/ files
168
186
  """
169
187
  api_key = require_api_key()
170
188
 
@@ -328,12 +346,17 @@ def sandbox_start(
328
346
  # Create session using v2 SDK
329
347
  if not json_output:
330
348
  console.print("[cyan]Creating sandbox...[/cyan]")
349
+ if connect_network:
350
+ console.print("[cyan]Network connection will be established after VM is ready...[/cyan]")
351
+ console.print(
352
+ "[yellow]Note: First connection on older VMs may take a few minutes to install WireGuard[/yellow]"
353
+ )
331
354
 
332
355
  try:
333
356
  plato = PlatoV2(api_key=api_key)
334
357
  if not env_config:
335
358
  raise ValueError("No environment configuration provided")
336
- session = plato.sessions.create(envs=[env_config], timeout=timeout)
359
+ session = plato.sessions.create(envs=[env_config], timeout=timeout, connect_network=connect_network)
337
360
 
338
361
  # Get session info
339
362
  session_id = session.session_id
@@ -356,109 +379,68 @@ def sandbox_start(
356
379
  if not json_output:
357
380
  console.print(f"[yellow]Could not get public URL: {e}[/yellow]")
358
381
 
359
- # Reset environment unless --no-reset
360
- if not no_reset:
361
- if not json_output:
362
- console.print("[cyan]Resetting environment...[/cyan]")
363
- session.reset()
382
+ # Note: We don't reset here - start just launches the sandbox.
383
+ # Reset is a separate action the user can take later if needed.
364
384
 
365
385
  # Setup SSH for ALL modes (so you can SSH into any sandbox)
366
- ssh_host = None
367
- ssh_config_path = None
368
386
  ssh_private_key_path = None
369
387
 
370
- if job_id:
388
+ if session_id and connect_network:
371
389
  if not json_output:
372
390
  console.print("[cyan]Setting up SSH access...[/cyan]")
373
391
  try:
374
- # Step 1: Generate SSH key pair and create SSH config (like Go hub does)
375
- # For config mode: use "plato" user (setup_sandbox configures this)
376
- # For artifact/simulator modes: use "root" user (setup_root_access configures this)
377
- ssh_username = "plato" if (mode == "config" and full_dataset_config_dict) else "root"
378
-
392
+ # Step 1: Generate SSH key pair
379
393
  if not json_output:
380
394
  console.print("[cyan] Generating SSH key pair...[/cyan]")
381
395
 
382
- base_url = os.getenv("PLATO_BASE_URL", "https://plato.so")
383
- ssh_info = setup_ssh_for_sandbox(base_url, job_id, username=ssh_username, working_dir=working_dir)
384
- ssh_host = ssh_info["ssh_host"]
385
- ssh_config_path = ssh_info["config_path"]
386
- ssh_private_key_path = ssh_info["private_key_path"]
387
- ssh_public_key = ssh_info["public_key"]
396
+ public_key, private_key_path = generate_ssh_key_pair(session_id[:8], working_dir)
397
+ ssh_private_key_path = private_key_path
388
398
 
399
+ # Step 2: Add SSH key to all VMs in the session via API
389
400
  if not json_output:
390
- console.print(f"[cyan] SSH config: {ssh_config_path}[/cyan]")
391
-
392
- # Step 2: Upload SSH key to sandbox
393
- # For --from-config mode: use setup_sandbox with full config
394
- # For --simulator/--artifact-id modes: use setup_root_access (just SSH key, no config changes)
395
- if not json_output:
396
- console.print("[cyan] Uploading SSH key to sandbox...[/cyan]")
397
-
398
- if mode == "config" and full_dataset_config_dict:
399
- # Full config from plato-config.yml - use setup_sandbox API
400
- compute_dict = full_dataset_config_dict.get("compute", {})
401
- metadata_dict = full_dataset_config_dict.get("metadata", {})
402
- services_dict = full_dataset_config_dict.get("services")
403
- listeners_dict = full_dataset_config_dict.get("listeners")
404
-
405
- compute_obj = AppSchemasBuildModelsSimConfigCompute(
406
- cpus=compute_dict.get("cpus", 2),
407
- memory=compute_dict.get("memory", 2048),
408
- disk=compute_dict.get("disk", 10240),
409
- app_port=compute_dict.get("app_port", 80),
410
- plato_messaging_port=compute_dict.get("plato_messaging_port", 7000),
411
- )
401
+ console.print("[cyan] Adding SSH key to VMs...[/cyan]")
412
402
 
413
- metadata_obj = AppSchemasBuildModelsSimConfigMetadata(
414
- name=metadata_dict.get("name", sim_name or "sandbox"),
415
- description=metadata_dict.get("description", ""),
416
- source_code_url=metadata_dict.get("source_code_url"),
417
- start_url=metadata_dict.get("start_url", "blank"),
418
- license=metadata_dict.get("license"),
419
- variables=metadata_dict.get("variables"),
420
- flows_path=metadata_dict.get("flows_path"),
421
- )
403
+ ssh_username = "root"
404
+ add_key_request = AddSSHKeyRequest(
405
+ public_key=public_key,
406
+ username=ssh_username,
407
+ )
422
408
 
423
- dataset_config_obj = AppSchemasBuildModelsSimConfigDataset(
424
- compute=compute_obj,
425
- metadata=metadata_obj,
426
- services=services_dict,
427
- listeners=listeners_dict,
409
+ with get_http_client() as client:
410
+ add_key_response = sessions_add_ssh_key.sync(
411
+ client=client,
412
+ session_id=session_id,
413
+ body=add_key_request,
414
+ x_api_key=api_key,
428
415
  )
429
416
 
430
- dataset_value = dataset_name or state_extras.get("dataset", "base")
431
- setup_request = AppSchemasBuildModelsSetupSandboxRequest(
432
- service=sim_name or "",
433
- dataset=str(dataset_value) if dataset_value else "",
434
- plato_dataset_config=dataset_config_obj,
435
- ssh_public_key=ssh_public_key,
436
- )
417
+ if not json_output:
418
+ # Debug: show full response
419
+ console.print("[yellow]DEBUG add_ssh_key response:[/yellow]")
420
+ console.print(f" success: {add_key_response.success}")
421
+
422
+ # Show results for each job
423
+ for jid, result in add_key_response.results.items():
424
+ console.print(f" [cyan]Job {jid}:[/cyan]")
425
+ console.print(f" success: {result.success}")
426
+ console.print(f" error: {result.error}")
427
+ console.print(" output:")
428
+ if result.output:
429
+ console.print(result.output)
430
+ else:
431
+ console.print(" (none)")
432
+ if result.success:
433
+ console.print(f" [green]✓[/green] {jid}: SSH key added")
434
+ else:
435
+ console.print(f" [red]✗[/red] {jid}: {result.error}")
437
436
 
438
- with get_http_client() as client:
439
- _setup_response = setup_sandbox.sync(
440
- client=client,
441
- public_id=job_id,
442
- body=setup_request,
443
- x_api_key=api_key,
444
- )
437
+ if add_key_response.success:
438
+ if not json_output:
439
+ console.print("[green]SSH setup complete![/green]")
440
+ console.print(" [cyan]SSH:[/cyan] plato sandbox ssh")
445
441
  else:
446
- # Artifact/simulator modes - use setup_root_access API (just SSH key, preserves existing config)
447
- setup_root_request = SetupRootPasswordRequest(
448
- ssh_public_key=ssh_public_key,
449
- )
450
-
451
- with get_http_client() as client:
452
- _setup_response = setup_root_access.sync(
453
- client=client,
454
- public_id=job_id,
455
- body=setup_root_request,
456
- x_api_key=api_key,
457
- )
458
-
459
- if not json_output:
460
- console.print("[green]SSH setup complete![/green]")
461
- console.print(f" [cyan]SSH:[/cyan] ssh -F {ssh_config_path} {ssh_host}")
442
+ if not json_output:
443
+ console.print("[red]SSH key setup failed - SSH may not work[/red]")
462
444
 
463
445
  except Exception as e:
464
446
  if not json_output:
@@ -482,16 +464,15 @@ def sandbox_start(
482
464
  "created_at": datetime.now(timezone.utc).isoformat(),
483
465
  **state_extras,
484
466
  }
485
- # Add SSH info if available from setup_sandbox
486
- if ssh_host:
487
- state["ssh_host"] = ssh_host
488
- if ssh_config_path:
489
- state["ssh_config_path"] = ssh_config_path
467
+ # Add SSH private key path if available
490
468
  if ssh_private_key_path:
491
469
  state["ssh_private_key_path"] = ssh_private_key_path
492
470
  # Add heartbeat PID
493
471
  if heartbeat_pid:
494
472
  state["heartbeat_pid"] = heartbeat_pid
473
+ # Add network connection status
474
+ if connect_network:
475
+ state["network_connected"] = True
495
476
  save_sandbox_state(state, working_dir)
496
477
 
497
478
  # Close the plato client (heartbeat process keeps session alive)
@@ -504,11 +485,9 @@ def sandbox_start(
504
485
  "job_id": job_id,
505
486
  "public_url": display_url, # Full URL with _plato_router_target
506
487
  }
507
- if ssh_host:
508
- output["ssh_host"] = ssh_host
509
- if ssh_config_path:
510
- output["ssh_config_path"] = ssh_config_path
511
- output["ssh_command"] = f"ssh -F {ssh_config_path} {ssh_host}"
488
+ if ssh_private_key_path:
489
+ output["ssh_private_key_path"] = ssh_private_key_path
490
+ output["ssh_command"] = "plato sandbox ssh"
512
491
  console.print(json.dumps(output))
513
492
  else:
514
493
  console.print("\n[green]Sandbox started successfully![/green]")
@@ -517,15 +496,36 @@ def sandbox_start(
517
496
  if public_url:
518
497
  display_url = format_public_url_with_router_target(public_url, sim_name)
519
498
  console.print(f" [cyan]Public URL:[/cyan] {display_url}")
520
- if ssh_host and ssh_config_path:
521
- console.print(f" [cyan]SSH:[/cyan] ssh -F {ssh_config_path} {ssh_host}")
499
+ if ssh_private_key_path:
500
+ console.print(" [cyan]SSH:[/cyan] plato sandbox ssh")
501
+ # Warn if using host-only routing (no VM-to-VM mesh)
502
+ if connect_network and hasattr(session, "network_host_only") and session.network_host_only:
503
+ console.print("\n[yellow]Warning: WireGuard not available in VM - using host-only routing[/yellow]")
504
+ console.print("[yellow] SSH from outside works, but VM-to-VM networking is disabled[/yellow]")
522
505
  console.print(f"\n[dim]State saved to {SANDBOX_FILE}[/dim]")
523
506
 
524
507
  except Exception as e:
525
508
  if json_output:
526
509
  console.print(json.dumps({"error": str(e)}))
527
510
  else:
528
- console.print(f"[red]Failed to start sandbox: {e}[/red]")
511
+ error_msg = str(e)
512
+ # Check if it's a network connection error with VM details
513
+ if "Network connection failed" in error_msg or "WireGuard" in error_msg:
514
+ console.print("[red]Failed to start sandbox - network setup failed[/red]")
515
+ console.print("[yellow]VM error:[/yellow]")
516
+ # Clean up error message - remove SSH warnings and format nicely
517
+ clean_lines = []
518
+ for line in error_msg.split("\n"):
519
+ line = line.strip()
520
+ # Skip SSH warnings
521
+ if line.startswith("Warning:") or "known hosts" in line:
522
+ continue
523
+ if line:
524
+ clean_lines.append(line)
525
+ for line in clean_lines:
526
+ console.print(f" {line}")
527
+ else:
528
+ console.print(f"[red]Failed to start sandbox: {e}[/red]")
529
529
  raise typer.Exit(1) from e
530
530
 
531
531
 
@@ -542,30 +542,23 @@ def sandbox_snapshot(
542
542
  messaging_port: int = typer.Option(None, "--messaging-port", help="Override messaging port"),
543
543
  target: str = typer.Option(None, "--target", help="Override target domain (e.g., myapp.web.plato.so)"),
544
544
  ):
545
- """
546
- Create a snapshot of the current sandbox state.
547
-
548
- Saves the artifact ID to .sandbox.yaml so it can be used by
549
- 'plato pm submit base' without needing to specify it manually.
550
-
551
- CONFIG BEHAVIOR:
552
-
553
- - Sandboxes started from config (-c): Automatically includes plato-config.yml,
554
- flows.yml, app_port, and messaging_port in the snapshot.
555
- - Sandboxes started from artifact: Inherits config from parent artifact.
556
- Use --include-config to override with local config files.
557
-
558
- USAGE:
559
-
560
- plato sandbox snapshot # Creates snapshot, saves artifact_id
561
- plato sandbox snapshot --json # JSON output
562
- plato sandbox snapshot -c # Force include local config files
563
- plato sandbox snapshot --app-port 8080 # Override app port
564
-
565
- NEXT STEPS:
566
-
567
- After snapshot, you can submit for review:
568
- plato pm submit base # Reads artifact_id from .sandbox.yaml
545
+ """Create a snapshot of the current sandbox state.
546
+
547
+ Captures the current VM state as an artifact that can be submitted for review or
548
+ used as a starting point for future sandboxes. The artifact ID is saved to
549
+ .sandbox.yaml so it can be used by 'plato pm submit base'.
550
+
551
+ For sandboxes started from config (-c), automatically includes plato-config.yml and
552
+ flows.yml in the snapshot. For sandboxes started from an artifact, config is inherited
553
+ from the parent.
554
+
555
+ Options:
556
+ -j, --json: Output results as JSON instead of formatted text
557
+ -c, --include-config: Force including local plato-config.yml and flows.yml in the
558
+ snapshot. Auto-enabled for sandboxes started from config.
559
+ --app-port: Override the internal application port stored in the artifact
560
+ --messaging-port: Override the Plato messaging port stored in the artifact
561
+ --target: Override the target domain (e.g., myapp.web.plato.so)
569
562
  """
570
563
  api_key = require_api_key()
571
564
  state = require_sandbox_state()
@@ -660,26 +653,11 @@ def sandbox_snapshot(
660
653
 
661
654
  @sandbox_app.command(name="stop")
662
655
  def sandbox_stop():
663
- """
664
- Stop and destroy the current sandbox.
656
+ """Stop and destroy the current sandbox.
665
657
 
666
- Closes the session, cleans up SSH keys, and removes .sandbox.yaml.
658
+ Terminates the remote VM session, stops the heartbeat background process,
659
+ cleans up local SSH keys created for this sandbox, and removes .sandbox.yaml.
667
660
  Run this when you're done with the sandbox or want to start fresh.
668
-
669
- REQUIRES:
670
-
671
- .sandbox.yaml in current directory (created by 'plato sandbox start')
672
-
673
- USAGE:
674
-
675
- plato sandbox stop
676
-
677
- WHAT IT DOES:
678
-
679
- 1. Stops the heartbeat process
680
- 2. Closes the remote session
681
- 3. Removes SSH config and keys
682
- 4. Deletes .sandbox.yaml
683
661
  """
684
662
  api_key = require_api_key()
685
663
  state = require_sandbox_state()
@@ -703,16 +681,9 @@ def sandbox_stop():
703
681
  x_api_key=api_key,
704
682
  )
705
683
 
706
- # Clean up SSH config and key files (like Go hub does)
707
- ssh_config_path = state.get("ssh_config_path")
684
+ # Clean up SSH key files
708
685
  ssh_private_key_path = state.get("ssh_private_key_path")
709
686
 
710
- if ssh_config_path:
711
- config_file = Path(ssh_config_path)
712
- if config_file.exists():
713
- config_file.unlink()
714
- console.print(f"[dim]Removed {ssh_config_path}[/dim]")
715
-
716
687
  if ssh_private_key_path:
717
688
  private_key_file = Path(ssh_private_key_path)
718
689
  public_key_file = Path(ssh_private_key_path + ".pub")
@@ -731,32 +702,78 @@ def sandbox_stop():
731
702
  raise typer.Exit(1) from e
732
703
 
733
704
 
734
- @sandbox_app.command(name="status")
735
- def sandbox_status(
705
+ @sandbox_app.command(name="connect-network")
706
+ def sandbox_connect_network(
707
+ session_id: str = typer.Option(None, "--session", "-s", help="Session ID (uses .sandbox.yaml if not provided)"),
736
708
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
737
709
  ):
710
+ """Connect all jobs in a session to a WireGuard network.
711
+
712
+ Establishes encrypted peer-to-peer networking between VMs in the session,
713
+ allowing SSH access from outside and VM-to-VM communication. Pre-generates
714
+ WireGuard keys, allocates IPs from the session network subnet, and configures
715
+ full mesh networking.
716
+
717
+ Options:
718
+ -s, --session: Session ID to connect. If not provided, reads from .sandbox.yaml
719
+ -j, --json: Output results as JSON instead of formatted text
738
720
  """
739
- Show current sandbox status and connection info.
721
+ api_key = require_api_key()
740
722
 
741
- Displays the public URL, SSH config, VM status, and other details
742
- from .sandbox.yaml plus live status from the API.
723
+ # Get session ID from argument or .sandbox.yaml
724
+ if session_id is None:
725
+ state = require_sandbox_state()
726
+ session_id = require_sandbox_field(state, "session_id")
743
727
 
744
- REQUIRES:
728
+ console.print(f"[cyan]Connecting session {session_id} to network...[/cyan]")
745
729
 
746
- .sandbox.yaml in current directory (created by 'plato sandbox start')
730
+ try:
731
+ with get_http_client() as client:
732
+ result = sessions_connect_network.sync(
733
+ client=client,
734
+ session_id=session_id,
735
+ body=ConnectNetworkRequest(),
736
+ x_api_key=api_key,
737
+ )
738
+
739
+ if json_output:
740
+ console.print_json(data=result)
741
+ else:
742
+ # Display results
743
+ statuses = result.get("statuses", {})
744
+ success_count = sum(1 for s in statuses.values() if s.get("success"))
745
+ total_count = len(statuses)
746
+
747
+ if success_count == total_count:
748
+ console.print(f"[green]All {total_count} jobs connected to network[/green]")
749
+ else:
750
+ console.print(f"[yellow]{success_count}/{total_count} jobs connected[/yellow]")
751
+
752
+ for job_id, status in statuses.items():
753
+ if status.get("success"):
754
+ wg_ip = status.get("wireguard_ip", "unknown")
755
+ console.print(f" [green]✓[/green] {job_id}: {wg_ip}")
756
+ else:
757
+ error = status.get("error", "unknown error")
758
+ console.print(f" [red]✗[/red] {job_id}: {error}")
759
+
760
+ except Exception as e:
761
+ console.print(f"[red]Failed to connect network: {e}[/red]")
762
+ raise typer.Exit(1) from e
747
763
 
748
- USAGE:
749
764
 
750
- plato sandbox status # Human-readable output
751
- plato sandbox status --json # JSON output for scripts
765
+ @sandbox_app.command(name="status")
766
+ def sandbox_status(
767
+ json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
768
+ ):
769
+ """Show current sandbox status and connection info.
752
770
 
753
- OUTPUT INCLUDES:
771
+ Displays information from .sandbox.yaml combined with live status from the API.
772
+ Shows session ID, job ID, VM status (running/stopped/etc.), public URL for browser
773
+ access, SSH connection details, network connection status, and heartbeat status.
754
774
 
755
- - Public URL (for browser access)
756
- - SSH config path (for 'ssh -F <config> sandbox-<id>')
757
- - VM status (running/stopped/etc.)
758
- - Session ID, Job ID
759
- - Service name, dataset
775
+ Options:
776
+ -j, --json: Output all status info as JSON instead of formatted text
760
777
  """
761
778
  state = require_sandbox_state()
762
779
 
@@ -876,10 +893,14 @@ def sandbox_status(
876
893
  console.print(f" [cyan]Created:[/cyan] {state.get('created_at')}")
877
894
 
878
895
  # Display SSH command if available
879
- ssh_host = state.get("ssh_host")
880
- ssh_config_path = state.get("ssh_config_path")
881
- if ssh_host and ssh_config_path:
882
- console.print(f" [cyan]SSH:[/cyan] ssh -F {ssh_config_path} {ssh_host}")
896
+ ssh_private_key_path = state.get("ssh_private_key_path")
897
+ job_id = state.get("job_id")
898
+ if ssh_private_key_path and job_id:
899
+ console.print(" [cyan]SSH:[/cyan] plato sandbox ssh")
900
+
901
+ # Display network connection status
902
+ if state.get("network_connected"):
903
+ console.print(" [cyan]Network:[/cyan] [green]connected[/green] (WireGuard)")
883
904
 
884
905
  # Display heartbeat process status
885
906
  heartbeat_pid = state.get("heartbeat_pid")
@@ -930,38 +951,24 @@ def sandbox_start_worker(
930
951
  wait_timeout: int = typer.Option(240, "--wait-timeout", help="Timeout in seconds for --wait (default: 240)"),
931
952
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
932
953
  ):
933
- """
934
- Start the Plato worker in the sandbox.
935
-
936
- The worker handles flow execution and state tracking. Only start it
937
- AFTER verifying login works (via 'plato sandbox flow' or browser testing).
938
-
939
- REQUIRES:
940
-
941
- .sandbox.yaml in current directory (created by 'plato sandbox start')
942
- plato-config.yml in current directory (or specify with --config-path)
943
-
944
- USAGE:
945
-
946
- plato sandbox start-worker # Uses plato-config.yml in cwd
947
- plato sandbox start-worker --wait # Wait for worker to be ready
948
- plato sandbox start-worker -d base # Specify dataset
949
- plato sandbox start-worker --config-path ./plato-config.yml
950
-
951
- WORKFLOW POSITION:
952
-
953
- 1. plato sandbox start -c
954
- 2. plato sandbox start-services
955
- 3. plato sandbox flow ← verify login works first!
956
- 4. plato sandbox start-worker --wait ← you are here (wait ~2-3 min)
957
- 5. plato sandbox flow ← run login again to verify with worker
958
- 6. plato sandbox state --verify-no-mutations ← verify no mutations
959
- 7. plato sandbox snapshot
960
-
961
- WARNING:
962
-
963
- Starting the worker with broken login causes infinite error loops.
964
- Always verify login works before starting the worker.
954
+ """Start the Plato worker in the sandbox.
955
+
956
+ The worker is the Plato component that handles flow execution, database audit
957
+ tracking, and state management. It should be started AFTER verifying the login
958
+ flow works manually, since a broken login with an active worker causes error loops.
959
+
960
+ Reads the dataset configuration from plato-config.yml to configure the worker
961
+ with the correct services, listeners, and compute settings.
962
+
963
+ Options:
964
+ -s, --service: Service name to configure the worker for. Defaults to value in
965
+ .sandbox.yaml if not provided.
966
+ -d, --dataset: Dataset name from plato-config.yml (default: "base")
967
+ --config-path: Path to plato-config.yml. Defaults to current directory.
968
+ -w, --wait: After starting, poll the state API until the worker is ready.
969
+ Useful in scripts to ensure the worker is fully initialized.
970
+ --wait-timeout: Timeout in seconds for --wait (default: 240 seconds)
971
+ -j, --json: Output results as JSON instead of formatted text
965
972
  """
966
973
  api_key = require_api_key()
967
974
  state = require_sandbox_state()
@@ -1122,36 +1129,20 @@ def sandbox_sync(
1122
1129
  timeout: int = typer.Option(120, "--timeout", "-t", help="Command timeout in seconds"),
1123
1130
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
1124
1131
  ):
1125
- """
1126
- Sync local files to the sandbox VM.
1127
-
1128
- Uploads local files to the remote sandbox. Useful for updating
1129
- docker-compose.yml, flows.yml, or other config without restarting.
1130
-
1131
- REQUIRES:
1132
-
1133
- .sandbox.yaml in current directory (created by 'plato sandbox start')
1134
-
1135
- USAGE:
1132
+ """Sync local files to the sandbox VM.
1136
1133
 
1137
- plato sandbox sync # Sync current directory
1138
- plato sandbox sync ./base # Sync specific directory
1139
- plato sandbox sync -r /custom/path # Custom remote path
1134
+ Creates a tar archive of local files and uploads it to the remote VM via the
1135
+ execute API. Excludes common build artifacts (.git, __pycache__, node_modules,
1136
+ .venv, etc.) to reduce transfer size.
1140
1137
 
1141
- DEFAULT REMOTE PATH:
1138
+ Arguments:
1139
+ path: Local path to sync (default: current directory)
1142
1140
 
1143
- /home/plato/worktree/<service>/
1144
-
1145
- WHAT IT SYNCS:
1146
-
1147
- - Respects .gitignore patterns
1148
- - Excludes .git, __pycache__, node_modules, etc.
1149
- - Creates tar archive and extracts on remote
1150
-
1151
- NOTE:
1152
-
1153
- For most workflows, use 'plato sandbox start-services' instead,
1154
- which syncs files AND restarts containers.
1141
+ Options:
1142
+ -r, --remote-path: Destination path on the VM. Defaults to
1143
+ /home/plato/worktree/<service> based on the service in .sandbox.yaml
1144
+ -t, --timeout: Command timeout in seconds for the extract operation (default: 120)
1145
+ -j, --json: Output results as JSON instead of formatted text
1155
1146
  """
1156
1147
  api_key = require_api_key()
1157
1148
  state = require_sandbox_state()
@@ -1367,41 +1358,25 @@ def sandbox_flow(
1367
1358
  local: bool = typer.Option(False, "--local", "-l", help="Force using local flows.yml only"),
1368
1359
  api: bool = typer.Option(False, "--api", "-a", help="Force fetching flows from API only"),
1369
1360
  ):
1370
- """
1371
- Execute a test flow against the running sandbox.
1372
-
1373
- Runs a flow (like login) to verify it works before starting the worker.
1374
- Opens a browser and executes the flow steps automatically.
1375
-
1376
- REQUIRES:
1377
-
1378
- .sandbox.yaml in current directory (created by 'plato sandbox start')
1379
- Either:
1380
- - Local plato-config.yml with flows_path pointing to flows.yml
1381
- - Or sandbox started from artifact (flows fetched from API)
1382
-
1383
- USAGE:
1384
-
1385
- plato sandbox flow # Run "login" flow (default)
1386
- plato sandbox flow -f login # Explicit flow name
1387
- plato sandbox flow -f incorrect_login # Test failed login flow
1388
- plato sandbox flow --local # Force local flows.yml
1389
- plato sandbox flow --api # Force API flows (from artifact)
1390
-
1391
- WORKFLOW POSITION:
1361
+ """Execute a test flow against the running sandbox.
1392
1362
 
1393
- 1. plato sandbox start -c
1394
- 2. plato sandbox start-services
1395
- 3. plato sandbox flow ← you are here (verify login)
1396
- 4. plato sandbox start-worker
1397
- 5. plato sandbox snapshot
1363
+ Runs a named flow (like "login") using Playwright to verify it works correctly.
1364
+ Opens a visible browser window, navigates to the sandbox public URL, and executes
1365
+ the flow steps automatically. Useful for testing login flows before starting
1366
+ the worker.
1398
1367
 
1399
- FLOW SOURCE (default priority):
1368
+ By default, looks for flows in local flows.yml (path from plato-config.yml
1369
+ metadata.flows_path), then falls back to fetching from the API if the sandbox
1370
+ was started from an artifact.
1400
1371
 
1401
- 1. Local flows.yml (from plato-config.yml metadata.flows_path)
1402
- 2. API (fetched from artifact if started from simulator)
1372
+ Options:
1373
+ -f, --flow-name: Name of the flow to execute from flows.yml (default: "login")
1374
+ -l, --local: Only use local flows.yml file. Errors if not found instead of
1375
+ falling back to API.
1376
+ -a, --api: Only fetch flows from the API (from the artifact). Ignores any
1377
+ local flows.yml file.
1403
1378
 
1404
- Use --local or --api to override this behavior.
1379
+ Note: --local and --api are mutually exclusive.
1405
1380
  """
1406
1381
  # Validate mutually exclusive flags
1407
1382
  if local and api:
@@ -1581,30 +1556,19 @@ def sandbox_state_cmd(
1581
1556
  ),
1582
1557
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
1583
1558
  ):
1584
- """
1585
- Get the database state/mutations from the simulator.
1586
-
1587
- Shows what database changes have been detected since the last reset.
1588
- Useful during review to verify:
1589
- - No mutations after login (state should be empty)
1590
- - Mutations appear after making changes (audit is working)
1591
-
1592
- REQUIRES:
1593
-
1594
- .sandbox.yaml in current directory (created by 'plato sandbox start')
1595
-
1596
- USAGE:
1597
-
1598
- plato sandbox state # Show current state
1599
- plato sandbox state --verify-no-mutations # Exit 1 if mutations found
1600
- plato sandbox state -v # Short form
1601
-
1602
- USED DURING REVIEW:
1603
-
1604
- 1. Run login flow
1605
- 2. plato sandbox state -v ← should pass (no mutations)
1606
- 3. Make a change in the app
1607
- 4. plato sandbox state ← should show mutations
1559
+ """Get the database state/mutations from the simulator.
1560
+
1561
+ Queries the worker to show what database changes have been detected since the last
1562
+ reset. Displays mutations grouped by table and operation type (INSERT/UPDATE/DELETE).
1563
+ Useful for verifying that login flows don't cause unwanted database mutations and
1564
+ that the audit system is properly tracking changes.
1565
+
1566
+ Options:
1567
+ -v, --verify-no-mutations: Exit with code 1 if any mutations are detected.
1568
+ Useful for CI/automation to verify login doesn't cause database changes.
1569
+ If mutations are found, the exit code indicates failure.
1570
+ -j, --json: Output the full state response as JSON instead of formatted text.
1571
+ Includes has_mutations and has_error fields for scripting.
1608
1572
  """
1609
1573
  sandbox_state = require_sandbox_state()
1610
1574
  api_key = require_api_key()
@@ -1790,42 +1754,25 @@ def sandbox_clear_audit(
1790
1754
  dataset: str = typer.Option("base", "--dataset", "-d", help="Dataset name"),
1791
1755
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
1792
1756
  ):
1793
- """
1794
- Clear the audit_log table(s) in the sandbox database.
1795
-
1796
- Truncates all audit_log tables to reset mutation tracking. Use this after
1797
- initial setup/login to clear any mutations before running a clean login flow.
1798
-
1799
- REQUIRES:
1757
+ """Clear the audit_log table(s) in the sandbox database.
1800
1758
 
1801
- .sandbox.yaml in current directory (created by 'plato sandbox start')
1802
- plato-config.yml with database listener config
1759
+ Truncates all audit_log tables to reset mutation tracking. Use this after initial
1760
+ setup or login has generated expected mutations, so you can verify that subsequent
1761
+ login flows don't create new mutations.
1803
1762
 
1804
- USAGE:
1763
+ Reads database connection info from plato-config.yml listeners and executes the
1764
+ appropriate SQL (PostgreSQL TRUNCATE or MySQL DELETE) via SSH to the sandbox VM.
1805
1765
 
1806
- plato sandbox clear-audit # Uses plato-config.yml in cwd
1807
- plato sandbox clear-audit -d base # Specify dataset
1808
- plato sandbox clear-audit --json # JSON output
1809
-
1810
- WORKFLOW POSITION:
1811
-
1812
- 1. plato sandbox start -c
1813
- 2. plato sandbox start-services
1814
- 3. plato sandbox start-worker --wait
1815
- 4. (agent does initial login/setup, generating mutations)
1816
- 5. plato sandbox clear-audit ← you are here
1817
- 6. plato sandbox flow ← clean login flow
1818
- 7. plato sandbox state --verify-no-mutations ← should pass now
1819
- 8. plato sandbox snapshot
1766
+ Options:
1767
+ --config-path: Path to plato-config.yml file (default: looks in current directory)
1768
+ -d, --dataset: Dataset name to read listener configuration from (default: "base")
1769
+ -j, --json: Output results as JSON instead of formatted text
1820
1770
  """
1821
1771
  state = require_sandbox_state()
1772
+ job_id = state.get("job_id")
1822
1773
 
1823
- # Get SSH info
1824
- ssh_host = state.get("ssh_host")
1825
- ssh_config_path = state.get("ssh_config_path")
1826
-
1827
- if not ssh_host or not ssh_config_path:
1828
- console.print("[red]❌ SSH not configured. Missing ssh_host or ssh_config_path in .sandbox.yaml[/red]")
1774
+ if not job_id:
1775
+ console.print("[red]❌ No job_id found in .sandbox.yaml[/red]")
1829
1776
  raise typer.Exit(1)
1830
1777
 
1831
1778
  # Find plato-config.yml
@@ -1862,43 +1809,99 @@ def sandbox_clear_audit(
1862
1809
 
1863
1810
  results = []
1864
1811
 
1865
- for name, db_config in db_listeners:
1812
+ def _execute_db_cleanup(name: str, db_config: dict, local_port: int) -> dict:
1813
+ """Execute DB cleanup using sync SQLAlchemy (called after tunnel is up)."""
1866
1814
  db_type = db_config.get("db_type", "postgresql").lower()
1867
1815
  db_user = db_config.get("db_user", "postgres" if db_type == "postgresql" else "root")
1868
1816
  db_password = db_config.get("db_password", "")
1869
1817
  db_database = db_config.get("db_database", "postgres")
1870
1818
 
1871
- if not json_output:
1872
- console.print(f"[cyan]Clearing audit_log for listener '{name}' ({db_type})...[/cyan]")
1819
+ # Build SQLAlchemy URL based on db_type (sync drivers)
1820
+ user = quote_plus(db_user)
1821
+ password = quote_plus(db_password)
1822
+ database = quote_plus(db_database)
1873
1823
 
1874
- # Build SQL command based on db_type
1875
- # Use docker exec since psql/mysql aren't installed on the VM directly
1876
1824
  if db_type == "postgresql":
1877
- # Find the postgres container and truncate all audit_log tables across all schemas
1878
- # Use $body$ delimiter instead of $$ to avoid shell expansion
1879
- truncate_sql = "DO \\$body\\$ DECLARE r RECORD; BEGIN FOR r IN SELECT schemaname FROM pg_tables WHERE tablename = 'audit_log' LOOP EXECUTE format('TRUNCATE TABLE %I.audit_log RESTART IDENTITY CASCADE', r.schemaname); END LOOP; END \\$body\\$;"
1880
- sql_cmd = f"CONTAINER=$(docker ps --format '{{{{.Names}}}}\\t{{{{.Image}}}}' | grep -i postgres | head -1 | cut -f1) && docker exec $CONTAINER psql -U {db_user} -d {db_database} -c \"{truncate_sql}\""
1825
+ db_url = f"postgresql+psycopg2://{user}:{password}@127.0.0.1:{local_port}/{database}"
1881
1826
  elif db_type in ("mysql", "mariadb"):
1882
- # Find the mysql/mariadb container and exec into it
1883
- # Use mariadb client (mysql is a symlink or may not exist in newer mariadb images)
1884
- sql_cmd = f"CONTAINER=$(docker ps --format '{{{{.Names}}}}\\t{{{{.Image}}}}' | grep -iE 'mysql|mariadb' | head -1 | cut -f1) && docker exec $CONTAINER mariadb -u {db_user} -p'{db_password}' {db_database} -e 'SET FOREIGN_KEY_CHECKS=0; DELETE FROM audit_log; SET FOREIGN_KEY_CHECKS=1;'"
1827
+ db_url = f"mysql+pymysql://{user}:{password}@127.0.0.1:{local_port}/{database}"
1885
1828
  else:
1886
- if not json_output:
1887
- console.print(f"[yellow]⚠ Unsupported db_type '{db_type}' for listener '{name}'[/yellow]")
1888
- results.append({"listener": name, "success": False, "error": f"Unsupported db_type: {db_type}"})
1889
- continue
1829
+ return {"listener": name, "success": False, "error": f"Unsupported db_type: {db_type}"}
1830
+
1831
+ engine = create_engine(db_url, pool_pre_ping=True)
1832
+ tables_truncated = []
1833
+
1834
+ with engine.begin() as conn:
1835
+ if db_type == "postgresql":
1836
+ # Find and truncate audit_log tables in all schemas
1837
+ result = conn.execute(text("SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'audit_log'"))
1838
+ tables = result.fetchall()
1839
+ for schema, table in tables:
1840
+ conn.execute(text(f"TRUNCATE TABLE {schema}.{table} RESTART IDENTITY CASCADE"))
1841
+ tables_truncated.append(f"{schema}.{table}")
1842
+
1843
+ elif db_type in ("mysql", "mariadb"):
1844
+ # Find and delete from audit_log tables
1845
+ result = conn.execute(
1846
+ text(
1847
+ "SELECT table_schema, table_name FROM information_schema.tables "
1848
+ "WHERE table_name = 'audit_log' AND table_schema = DATABASE()"
1849
+ )
1850
+ )
1851
+ tables = result.fetchall()
1852
+ conn.execute(text("SET FOREIGN_KEY_CHECKS = 0"))
1853
+ for schema, table in tables:
1854
+ conn.execute(text(f"DELETE FROM `{table}`"))
1855
+ tables_truncated.append(table)
1856
+ conn.execute(text("SET FOREIGN_KEY_CHECKS = 1"))
1857
+
1858
+ engine.dispose()
1859
+ return {"listener": name, "success": True, "tables_truncated": tables_truncated}
1860
+
1861
+ async def clear_audit_via_tunnel(name: str, db_config: dict) -> dict:
1862
+ """Clear audit_log by connecting via proxy tunnel."""
1863
+ db_type = db_config.get("db_type", "postgresql").lower()
1864
+ db_port = db_config.get("db_port", 5432 if db_type == "postgresql" else 3306)
1865
+
1866
+ if not json_output:
1867
+ console.print(f"[cyan]Clearing audit_log for listener '{name}' ({db_type})...[/cyan]")
1890
1868
 
1891
- # Run via SSH
1892
- ret, stdout, stderr = _run_ssh_command(ssh_config_path, ssh_host, sql_cmd)
1869
+ # Find a free local port for the tunnel
1870
+ local_port = find_free_port()
1893
1871
 
1894
- if ret == 0:
1895
- if not json_output:
1896
- console.print(f"[green]✅ Cleared audit_log for '{name}'[/green]")
1897
- results.append({"listener": name, "success": True})
1898
- else:
1872
+ # Create tunnel and connect
1873
+ tunnel = ProxyTunnel(
1874
+ env_id=job_id,
1875
+ db_port=db_port,
1876
+ temp_password="newpass",
1877
+ host_port=local_port,
1878
+ )
1879
+
1880
+ try:
1881
+ await tunnel.start()
1882
+
1883
+ # Run sync DB cleanup in a thread to avoid blocking the event loop
1884
+ result = await asyncio.to_thread(_execute_db_cleanup, name, db_config, local_port)
1885
+
1886
+ if result["success"]:
1887
+ tables_truncated = result.get("tables_truncated", [])
1888
+ if not json_output:
1889
+ console.print(f"[green]✅ Cleared audit_log for '{name}' ({len(tables_truncated)} tables)[/green]")
1890
+ return result
1891
+
1892
+ except Exception as e:
1899
1893
  if not json_output:
1900
- console.print(f"[red]❌ Failed to clear audit_log for '{name}': {stderr}[/red]")
1901
- results.append({"listener": name, "success": False, "error": stderr})
1894
+ console.print(f"[red]❌ Failed to clear audit_log for '{name}': {e}[/red]")
1895
+ return {"listener": name, "success": False, "error": str(e)}
1896
+ finally:
1897
+ await tunnel.stop()
1898
+
1899
+ # Run async cleanup for each listener
1900
+ async def run_all():
1901
+ tasks = [clear_audit_via_tunnel(name, db_config) for name, db_config in db_listeners]
1902
+ return await asyncio.gather(*tasks)
1903
+
1904
+ results = asyncio.run(run_all())
1902
1905
 
1903
1906
  # Call state API to refresh in-memory mutation cache
1904
1907
  session_id = state.get("session_id")
@@ -1932,26 +1935,15 @@ def sandbox_clear_audit(
1932
1935
 
1933
1936
  @sandbox_app.command(name="audit-ui")
1934
1937
  def sandbox_audit_ui():
1935
- """
1936
- Launch Streamlit UI for configuring database audit rules.
1937
-
1938
- Opens a visual interface to help configure audit_ignore_tables
1939
- in plato-config.yml. Useful when you see unwanted mutations
1940
- during review (like session tables, timestamps, etc.).
1941
-
1942
- REQUIRES:
1938
+ """Launch Streamlit UI for configuring database audit rules.
1943
1939
 
1944
- streamlit installed: pip install streamlit psycopg2-binary pymysql
1940
+ Opens a visual web interface to help configure audit_ignore_tables in plato-config.yml.
1941
+ The UI shows database tables and their recent mutations, making it easy to identify
1942
+ which tables or columns should be ignored (like session tables, last_login timestamps,
1943
+ etc. that change on every login).
1945
1944
 
1946
- USAGE:
1947
-
1948
- plato sandbox audit-ui
1949
-
1950
- WHEN TO USE:
1951
-
1952
- - Review shows mutations after login (sessions, timestamps)
1953
- - Need to figure out which tables/columns to ignore
1954
- - Want visual help building audit_ignore_tables config
1945
+ Requires streamlit and database drivers to be installed:
1946
+ pip install streamlit psycopg2-binary pymysql
1955
1947
  """
1956
1948
  # Check if streamlit is installed
1957
1949
  if not shutil.which("streamlit"):
@@ -2117,40 +2109,20 @@ def _stop_heartbeat_process(pid: int) -> bool:
2117
2109
  def sandbox_start_services(
2118
2110
  json_output: bool = typer.Option(False, "--json", "-j", help="Output as JSON"),
2119
2111
  ):
2120
- """
2121
- Deploy and start docker compose services on the sandbox.
2122
-
2123
- Syncs your local code to the VM and starts the containers defined
2124
- in plato-config.yml. This is the main command for deploying your app.
2125
-
2126
- REQUIRES:
2127
-
2128
- .sandbox.yaml in current directory (created by 'plato sandbox start -c')
2129
- plato-config.yml with services defined
2130
-
2131
- USAGE:
2132
-
2133
- plato sandbox start-services # Deploy and start containers
2134
- plato sandbox start-services --json # JSON output
2135
-
2136
- WHAT IT DOES:
2137
-
2138
- 1. Pushes local code to Plato Hub (Gitea)
2139
- 2. Clones code on VM via SSH
2140
- 3. Runs 'docker compose up -d' on VM
2141
- 4. Waits for containers to be healthy
2112
+ """Deploy and start docker compose services on the sandbox.
2142
2113
 
2143
- WORKFLOW POSITION:
2114
+ Syncs your local code to the sandbox VM and starts containers. The process:
2115
+ 1. Gets Gitea credentials and pushes local code to a new branch on Plato Hub
2116
+ 2. Clones the code on the VM via SSH
2117
+ 3. Runs 'docker compose up -d' for each docker-compose service defined in
2118
+ the plato-config.yml services section
2144
2119
 
2145
- 1. plato sandbox start -c ← creates VM
2146
- 2. plato sandbox start-services ← you are here (deploy app)
2147
- 3. plato sandbox flow ← verify login
2148
- 4. plato sandbox start-worker
2149
- 5. plato sandbox snapshot
2120
+ Run this command again after making local changes to re-sync and restart containers.
2121
+ Requires SSH to be configured (network is enabled by default).
2150
2122
 
2151
- AFTER MAKING CHANGES:
2152
-
2153
- Run this command again to re-sync and restart containers.
2123
+ Options:
2124
+ -j, --json: Output results as JSON instead of formatted text. Includes
2125
+ branch name, repo URL, VM path, and list of services started.
2154
2126
  """
2155
2127
  api_key = require_api_key()
2156
2128
  state = require_sandbox_state()
@@ -2380,9 +2352,33 @@ def sandbox_start_services(
2380
2352
  if not json_output:
2381
2353
  console.print(f"[green]✓ Code cloned to {repo_dir}[/green]")
2382
2354
 
2383
- # Step 6: Start services
2355
+ # Step 6: Authenticate ECR
2356
+ if not json_output:
2357
+ console.print("[cyan]Step 6: Authenticating Docker with ECR...[/cyan]")
2358
+
2359
+ ecr_registry = "383806609161.dkr.ecr.us-west-1.amazonaws.com"
2360
+ ecr_token_result = subprocess.run(
2361
+ ["aws", "ecr", "get-login-password", "--region", "us-west-1"],
2362
+ capture_output=True,
2363
+ text=True,
2364
+ )
2365
+ if ecr_token_result.returncode != 0:
2366
+ console.print(f"[red]❌ Failed to get ECR token: {ecr_token_result.stderr}[/red]")
2367
+ raise typer.Exit(1)
2368
+
2369
+ ecr_token = ecr_token_result.stdout.strip()
2370
+ docker_login_cmd = f"echo '{ecr_token}' | docker login --username AWS --password-stdin {ecr_registry}"
2371
+ ret, stdout, stderr = _run_ssh_command(ssh_config_path, ssh_host, docker_login_cmd)
2372
+ if ret != 0:
2373
+ console.print(f"[red]❌ Failed to authenticate Docker with ECR: {stderr}[/red]")
2374
+ raise typer.Exit(1)
2375
+
2376
+ if not json_output:
2377
+ console.print("[green]✓ Docker authenticated with ECR[/green]")
2378
+
2379
+ # Step 7: Start services
2384
2380
  if not json_output:
2385
- console.print("[cyan]Step 6: Starting services...[/cyan]")
2381
+ console.print("[cyan]Step 7: Starting services...[/cyan]")
2386
2382
 
2387
2383
  services_started = start_services_on_vm(repo_dir)
2388
2384