clonebox 0.1.15__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cli.py CHANGED
@@ -58,6 +58,359 @@ def print_banner():
58
58
  console.print(f" Version {__version__}\n", style="dim")
59
59
 
60
60
 
61
+ def _resolve_vm_name_and_config_file(name: Optional[str]) -> tuple[str, Optional[Path]]:
62
+ config_file: Optional[Path] = None
63
+
64
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
65
+ target_path = Path(name).expanduser().resolve()
66
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
67
+ if config_file.exists():
68
+ config = load_clonebox_config(config_file)
69
+ return config["vm"]["name"], config_file
70
+ raise FileNotFoundError(f"Config not found: {config_file}")
71
+
72
+ if not name:
73
+ config_file = Path.cwd() / ".clonebox.yaml"
74
+ if config_file.exists():
75
+ config = load_clonebox_config(config_file)
76
+ return config["vm"]["name"], config_file
77
+ raise FileNotFoundError("No VM name specified and no .clonebox.yaml found")
78
+
79
+ return name, None
80
+
81
+
82
+ def _qga_ping(vm_name: str, conn_uri: str) -> bool:
83
+ import subprocess
84
+
85
+ try:
86
+ result = subprocess.run(
87
+ [
88
+ "virsh",
89
+ "--connect",
90
+ conn_uri,
91
+ "qemu-agent-command",
92
+ vm_name,
93
+ json.dumps({"execute": "guest-ping"}),
94
+ ],
95
+ capture_output=True,
96
+ text=True,
97
+ timeout=5,
98
+ )
99
+ return result.returncode == 0
100
+ except Exception:
101
+ return False
102
+
103
+
104
+ def _qga_exec(vm_name: str, conn_uri: str, command: str, timeout: int = 10) -> Optional[str]:
105
+ import subprocess
106
+
107
+ try:
108
+ payload = {
109
+ "execute": "guest-exec",
110
+ "arguments": {
111
+ "path": "/bin/sh",
112
+ "arg": ["-c", command],
113
+ "capture-output": True,
114
+ },
115
+ }
116
+ exec_result = subprocess.run(
117
+ [
118
+ "virsh",
119
+ "--connect",
120
+ conn_uri,
121
+ "qemu-agent-command",
122
+ vm_name,
123
+ json.dumps(payload),
124
+ ],
125
+ capture_output=True,
126
+ text=True,
127
+ timeout=timeout,
128
+ )
129
+ if exec_result.returncode != 0:
130
+ return None
131
+
132
+ resp = json.loads(exec_result.stdout)
133
+ pid = resp.get("return", {}).get("pid")
134
+ if not pid:
135
+ return None
136
+
137
+ import base64
138
+ import time
139
+
140
+ deadline = time.time() + timeout
141
+ while time.time() < deadline:
142
+ status_payload = {"execute": "guest-exec-status", "arguments": {"pid": pid}}
143
+ status_result = subprocess.run(
144
+ [
145
+ "virsh",
146
+ "--connect",
147
+ conn_uri,
148
+ "qemu-agent-command",
149
+ vm_name,
150
+ json.dumps(status_payload),
151
+ ],
152
+ capture_output=True,
153
+ text=True,
154
+ timeout=5,
155
+ )
156
+ if status_result.returncode != 0:
157
+ return None
158
+
159
+ status_resp = json.loads(status_result.stdout)
160
+ ret = status_resp.get("return", {})
161
+ if not ret.get("exited", False):
162
+ time.sleep(0.3)
163
+ continue
164
+
165
+ out_data = ret.get("out-data")
166
+ if out_data:
167
+ return base64.b64decode(out_data).decode().strip()
168
+ return ""
169
+
170
+ return None
171
+ except Exception:
172
+ return None
173
+
174
+
175
+ def run_vm_diagnostics(
176
+ vm_name: str,
177
+ conn_uri: str,
178
+ config_file: Optional[Path],
179
+ *,
180
+ verbose: bool = False,
181
+ json_output: bool = False,
182
+ ) -> dict:
183
+ import subprocess
184
+
185
+ result: dict = {
186
+ "vm": {"name": vm_name, "conn_uri": conn_uri},
187
+ "state": {},
188
+ "network": {},
189
+ "qga": {},
190
+ "cloud_init": {},
191
+ "mounts": {},
192
+ "health": {},
193
+ }
194
+
195
+ console.print(f"[bold cyan]🧪 Diagnostics: {vm_name}[/]\n")
196
+
197
+ try:
198
+ domstate = subprocess.run(
199
+ ["virsh", "--connect", conn_uri, "domstate", vm_name],
200
+ capture_output=True,
201
+ text=True,
202
+ timeout=5,
203
+ )
204
+ result["state"] = {
205
+ "returncode": domstate.returncode,
206
+ "stdout": domstate.stdout.strip(),
207
+ "stderr": domstate.stderr.strip(),
208
+ }
209
+ if domstate.returncode == 0 and domstate.stdout.strip():
210
+ console.print(f"[green]✅ VM State: {domstate.stdout.strip()}[/]")
211
+ else:
212
+ console.print("[red]❌ VM State: unable to read[/]")
213
+ if verbose and domstate.stderr.strip():
214
+ console.print(f"[dim]{domstate.stderr.strip()}[/]")
215
+ except subprocess.TimeoutExpired:
216
+ result["state"] = {"error": "timeout"}
217
+ console.print("[red]❌ VM State: timeout[/]")
218
+ if json_output:
219
+ console.print_json(json.dumps(result))
220
+ return result
221
+
222
+ console.print("\n[bold]🔍 Checking VM network...[/]")
223
+ try:
224
+ domifaddr = subprocess.run(
225
+ ["virsh", "--connect", conn_uri, "domifaddr", vm_name],
226
+ capture_output=True,
227
+ text=True,
228
+ timeout=10,
229
+ )
230
+ result["network"] = {
231
+ "returncode": domifaddr.returncode,
232
+ "stdout": domifaddr.stdout.strip(),
233
+ "stderr": domifaddr.stderr.strip(),
234
+ }
235
+ if domifaddr.stdout.strip():
236
+ console.print(f"[dim]{domifaddr.stdout.strip()}[/]")
237
+ else:
238
+ console.print("[yellow]⚠️ No interface address detected yet[/]")
239
+ if verbose and domifaddr.stderr.strip():
240
+ console.print(f"[dim]{domifaddr.stderr.strip()}[/]")
241
+ except Exception as e:
242
+ result["network"] = {"error": str(e)}
243
+ console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
244
+
245
+ guest_agent_ready = _qga_ping(vm_name, conn_uri)
246
+ result["qga"]["ready"] = guest_agent_ready
247
+ if verbose:
248
+ console.print("\n[bold]🤖 QEMU Guest Agent...[/]")
249
+ console.print(f"{'[green]✅' if guest_agent_ready else '[red]❌'} QGA connected")
250
+
251
+ if not guest_agent_ready:
252
+ try:
253
+ dumpxml = subprocess.run(
254
+ ["virsh", "--connect", conn_uri, "dumpxml", vm_name],
255
+ capture_output=True,
256
+ text=True,
257
+ timeout=10,
258
+ )
259
+ has_qga_channel = False
260
+ if dumpxml.returncode == 0:
261
+ has_qga_channel = "org.qemu.guest_agent.0" in dumpxml.stdout
262
+ result["qga"]["dumpxml_returncode"] = dumpxml.returncode
263
+ result["qga"]["has_channel"] = has_qga_channel
264
+ if dumpxml.stderr.strip():
265
+ result["qga"]["dumpxml_stderr"] = dumpxml.stderr.strip()
266
+
267
+ console.print(
268
+ f"[dim]Guest agent channel in VM XML: {'present' if has_qga_channel else 'missing'}[/]"
269
+ )
270
+ except Exception as e:
271
+ result["qga"]["dumpxml_error"] = str(e)
272
+
273
+ try:
274
+ ping_attempt = subprocess.run(
275
+ [
276
+ "virsh",
277
+ "--connect",
278
+ conn_uri,
279
+ "qemu-agent-command",
280
+ vm_name,
281
+ json.dumps({"execute": "guest-ping"}),
282
+ ],
283
+ capture_output=True,
284
+ text=True,
285
+ timeout=10,
286
+ )
287
+ result["qga"]["ping_returncode"] = ping_attempt.returncode
288
+ result["qga"]["ping_stdout"] = ping_attempt.stdout.strip()
289
+ result["qga"]["ping_stderr"] = ping_attempt.stderr.strip()
290
+ if ping_attempt.stderr.strip():
291
+ console.print(f"[dim]qemu-agent-command stderr: {ping_attempt.stderr.strip()}[/]")
292
+ except Exception as e:
293
+ result["qga"]["ping_error"] = str(e)
294
+
295
+ console.print("[dim]If channel is present, the agent inside VM may not be running yet.[/]")
296
+ console.print("[dim]Inside VM try: sudo systemctl status qemu-guest-agent && sudo systemctl restart qemu-guest-agent[/]")
297
+
298
+ console.print("\n[bold]☁️ Checking cloud-init status...[/]")
299
+ cloud_init_complete = False
300
+ if not guest_agent_ready:
301
+ result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
302
+ console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU guest agent not connected yet)[/]")
303
+ else:
304
+ ready_msg = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-ready 2>/dev/null || true", timeout=10)
305
+ result["cloud_init"]["clonebox_ready_file"] = ready_msg
306
+ if ready_msg and "CloneBox VM ready" in ready_msg:
307
+ cloud_init_complete = True
308
+ result["cloud_init"]["status"] = "complete"
309
+ console.print("[green]✅ Cloud-init: Complete[/]")
310
+ else:
311
+ ci_status = _qga_exec(vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=10)
312
+ result["cloud_init"]["cloud_init_status"] = ci_status
313
+ result["cloud_init"]["status"] = "running"
314
+ console.print("[yellow]⏳ Cloud-init: Still running[/]")
315
+ if verbose and ci_status:
316
+ console.print(f"[dim]{ci_status}[/]")
317
+
318
+ console.print("\n[bold]💾 Checking mount status...[/]")
319
+ if not cloud_init_complete:
320
+ console.print("[dim]Mounts may not be ready until cloud-init completes.[/]")
321
+
322
+ mounts_detail: list[dict] = []
323
+ result["mounts"]["details"] = mounts_detail
324
+ if not guest_agent_ready:
325
+ console.print("[yellow]⏳ QEMU guest agent not connected yet - cannot verify mounts.[/]")
326
+ result["mounts"]["status"] = "unknown"
327
+ else:
328
+ if not config_file:
329
+ config_file = Path.cwd() / ".clonebox.yaml"
330
+
331
+ if not config_file.exists():
332
+ console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
333
+ result["mounts"]["status"] = "no_config"
334
+ else:
335
+ config = load_clonebox_config(config_file)
336
+ all_paths = config.get("paths", {}).copy()
337
+ all_paths.update(config.get("app_data_paths", {}))
338
+ result["mounts"]["expected"] = list(all_paths.values())
339
+ mount_output = _qga_exec(vm_name, conn_uri, "mount | grep 9p || true", timeout=10) or ""
340
+ mounted_paths = [line.split()[2] for line in mount_output.split("\n") if line.strip()]
341
+ result["mounts"]["mounted_paths"] = mounted_paths
342
+
343
+ mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
344
+ mount_table.add_column("Guest Path", style="bold")
345
+ mount_table.add_column("Mounted", justify="center")
346
+ mount_table.add_column("Accessible", justify="center")
347
+ mount_table.add_column("Files", justify="right")
348
+
349
+ working_mounts = 0
350
+ total_mounts = 0
351
+ for _, guest_path in all_paths.items():
352
+ total_mounts += 1
353
+ is_mounted = any(guest_path == mp or guest_path in mp for mp in mounted_paths)
354
+ accessible = False
355
+ file_count: str = "?"
356
+
357
+ if is_mounted:
358
+ test_out = _qga_exec(vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5)
359
+ accessible = test_out == "yes"
360
+ if accessible:
361
+ count_str = _qga_exec(vm_name, conn_uri, f"ls -A {guest_path} 2>/dev/null | wc -l", timeout=5)
362
+ if count_str and count_str.strip().isdigit():
363
+ file_count = count_str.strip()
364
+
365
+ if is_mounted and accessible:
366
+ working_mounts += 1
367
+
368
+ mount_table.add_row(
369
+ guest_path,
370
+ "[green]✅[/]" if is_mounted else "[red]❌[/]",
371
+ "[green]✅[/]" if accessible else ("[red]❌[/]" if is_mounted else "[dim]N/A[/]"),
372
+ file_count,
373
+ )
374
+ mounts_detail.append(
375
+ {
376
+ "guest_path": guest_path,
377
+ "mounted": is_mounted,
378
+ "accessible": accessible,
379
+ "files": file_count,
380
+ }
381
+ )
382
+
383
+ result["mounts"]["working"] = working_mounts
384
+ result["mounts"]["total"] = total_mounts
385
+ result["mounts"]["status"] = "ok" if working_mounts == total_mounts else "partial"
386
+
387
+ console.print(mount_table)
388
+ console.print(f"[dim]{working_mounts}/{total_mounts} mounts working[/]")
389
+
390
+ console.print("\n[bold]🏥 Health Check Status...[/]")
391
+ if not guest_agent_ready:
392
+ result["health"]["status"] = "unknown"
393
+ console.print("[dim]Health status: Not available yet (QEMU guest agent not ready)[/]")
394
+ else:
395
+ health_status = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10)
396
+ result["health"]["raw"] = health_status
397
+ if health_status and "HEALTH_STATUS=OK" in health_status:
398
+ result["health"]["status"] = "ok"
399
+ console.print("[green]✅ Health: All checks passed[/]")
400
+ elif health_status and "HEALTH_STATUS=FAILED" in health_status:
401
+ result["health"]["status"] = "failed"
402
+ console.print("[red]❌ Health: Some checks failed[/]")
403
+ else:
404
+ result["health"]["status"] = "not_run"
405
+ console.print("[yellow]⏳ Health check not yet run[/]")
406
+ if verbose and health_status:
407
+ console.print(f"[dim]{health_status}[/]")
408
+
409
+ if json_output:
410
+ console.print_json(json.dumps(result))
411
+ return result
412
+
413
+
61
414
  def interactive_mode():
62
415
  """Run the interactive VM creation wizard."""
63
416
  print_banner()
@@ -579,239 +932,177 @@ def cmd_list(args):
579
932
  console.print(table)
580
933
 
581
934
 
935
+ def cmd_container_up(args):
936
+ """Start a container sandbox."""
937
+ try:
938
+ from clonebox.container import ContainerCloner
939
+ from clonebox.models import ContainerConfig
940
+ except ModuleNotFoundError as e:
941
+ raise ModuleNotFoundError(
942
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
943
+ ) from e
944
+
945
+ mounts: dict[str, str] = {}
946
+ for m in getattr(args, "mount", []) or []:
947
+ if ":" not in m:
948
+ raise ValueError(f"Invalid mount: {m} (expected HOST:CONTAINER)")
949
+ host, container_path = m.split(":", 1)
950
+ mounts[host] = container_path
951
+
952
+ cfg_kwargs: dict = {
953
+ "engine": getattr(args, "engine", "auto"),
954
+ "image": getattr(args, "image", "ubuntu:22.04"),
955
+ "workspace": Path(getattr(args, "path", ".")),
956
+ "extra_mounts": mounts,
957
+ "env_from_dotenv": not getattr(args, "no_dotenv", False),
958
+ "packages": getattr(args, "package", []) or [],
959
+ "ports": getattr(args, "port", []) or [],
960
+ }
961
+ if getattr(args, "name", None):
962
+ cfg_kwargs["name"] = args.name
963
+
964
+ cfg = ContainerConfig(**cfg_kwargs)
965
+
966
+ cloner = ContainerCloner(engine=cfg.engine)
967
+ cloner.up(cfg, detach=getattr(args, "detach", False))
968
+
969
+
970
+ def cmd_container_ps(args):
971
+ """List containers."""
972
+ try:
973
+ from clonebox.container import ContainerCloner
974
+ except ModuleNotFoundError as e:
975
+ raise ModuleNotFoundError(
976
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
977
+ ) from e
978
+
979
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
980
+ items = cloner.ps(all=getattr(args, "all", False))
981
+
982
+ if getattr(args, "json", False):
983
+ print(json.dumps(items, indent=2))
984
+ return
985
+
986
+ if not items:
987
+ console.print("[dim]No containers found.[/]")
988
+ return
989
+
990
+ table = Table(title="Containers", border_style="cyan")
991
+ table.add_column("Name", style="bold")
992
+ table.add_column("Image")
993
+ table.add_column("Status")
994
+ table.add_column("Ports")
995
+
996
+ for c in items:
997
+ table.add_row(
998
+ str(c.get("name", "")),
999
+ str(c.get("image", "")),
1000
+ str(c.get("status", "")),
1001
+ str(c.get("ports", "")),
1002
+ )
1003
+
1004
+ console.print(table)
1005
+
1006
+
1007
+ def cmd_container_stop(args):
1008
+ """Stop a container."""
1009
+ try:
1010
+ from clonebox.container import ContainerCloner
1011
+ except ModuleNotFoundError as e:
1012
+ raise ModuleNotFoundError(
1013
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1014
+ ) from e
1015
+
1016
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1017
+ cloner.stop(args.name)
1018
+
1019
+
1020
+ def cmd_container_rm(args):
1021
+ """Remove a container."""
1022
+ try:
1023
+ from clonebox.container import ContainerCloner
1024
+ except ModuleNotFoundError as e:
1025
+ raise ModuleNotFoundError(
1026
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1027
+ ) from e
1028
+
1029
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1030
+ cloner.rm(args.name, force=getattr(args, "force", False))
1031
+
1032
+
1033
+ def cmd_container_down(args):
1034
+ """Stop and remove a container."""
1035
+ try:
1036
+ from clonebox.container import ContainerCloner
1037
+ except ModuleNotFoundError as e:
1038
+ raise ModuleNotFoundError(
1039
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1040
+ ) from e
1041
+
1042
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1043
+ cloner.stop(args.name)
1044
+ cloner.rm(args.name, force=True)
1045
+
1046
+
1047
+ def cmd_diagnose(args):
1048
+ """Run detailed VM diagnostics (standalone)."""
1049
+ name = args.name
1050
+ user_session = getattr(args, "user", False)
1051
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
1052
+
1053
+ try:
1054
+ vm_name, config_file = _resolve_vm_name_and_config_file(name)
1055
+ except FileNotFoundError as e:
1056
+ console.print(f"[red]❌ {e}[/]")
1057
+ return
1058
+
1059
+ run_vm_diagnostics(
1060
+ vm_name,
1061
+ conn_uri,
1062
+ config_file,
1063
+ verbose=getattr(args, "verbose", False),
1064
+ json_output=getattr(args, "json", False),
1065
+ )
1066
+
1067
+
582
1068
  def cmd_status(args):
583
1069
  """Check VM installation status and health from workstation."""
584
1070
  import subprocess
585
-
1071
+
586
1072
  name = args.name
587
1073
  user_session = getattr(args, "user", False)
588
1074
  conn_uri = "qemu:///session" if user_session else "qemu:///system"
589
-
590
- # If name is a path, load config to get VM name
591
- if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
592
- target_path = Path(name).expanduser().resolve()
593
- config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
594
- if config_file.exists():
595
- config = load_clonebox_config(config_file)
596
- name = config["vm"]["name"]
597
- else:
598
- console.print(f"[red]❌ Config not found: {config_file}[/]")
599
- return
600
-
601
- if not name:
602
- # Try current directory
603
- config_file = Path.cwd() / ".clonebox.yaml"
604
- if config_file.exists():
605
- config = load_clonebox_config(config_file)
606
- name = config["vm"]["name"]
607
- else:
608
- console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
609
- return
610
-
611
- console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
612
-
613
- # Check VM state
1075
+
614
1076
  try:
615
- result = subprocess.run(
616
- ["virsh", "--connect", conn_uri, "domstate", name],
617
- capture_output=True, text=True, timeout=5
618
- )
619
- vm_state = result.stdout.strip()
620
-
621
- if "running" in vm_state.lower():
622
- console.print(f"[green]✅ VM State: {vm_state}[/]")
623
- elif "shut off" in vm_state.lower():
624
- console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
625
- console.print("[dim]Start with: clonebox start .[/]")
626
- return
627
- else:
628
- console.print(f"[dim]VM State: {vm_state}[/]")
629
- except subprocess.TimeoutExpired:
630
- console.print("[red]❌ Timeout checking VM state[/]")
631
- return
632
- except Exception as e:
633
- console.print(f"[red]❌ Error: {e}[/]")
1077
+ vm_name, config_file = _resolve_vm_name_and_config_file(name)
1078
+ except FileNotFoundError as e:
1079
+ console.print(f"[red]❌ {e}[/]")
634
1080
  return
635
-
636
- # Get VM IP address
637
- console.print("\n[bold]🔍 Checking VM network...[/]")
638
- try:
639
- result = subprocess.run(
640
- ["virsh", "--connect", conn_uri, "domifaddr", name],
641
- capture_output=True, text=True, timeout=10
642
- )
643
- if result.stdout.strip():
644
- console.print(f"[dim]{result.stdout.strip()}[/]")
645
- # Extract IP
646
- for line in result.stdout.split('\n'):
647
- if 'ipv4' in line.lower():
648
- parts = line.split()
649
- for p in parts:
650
- if '/' in p and '.' in p:
651
- ip = p.split('/')[0]
652
- console.print(f"[green]IP Address: {ip}[/]")
653
- break
654
- else:
655
- console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
656
- except Exception as e:
657
- console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
658
-
659
- # Check cloud-init status via console
660
- console.print("\n[bold]☁️ Checking cloud-init status...[/]")
661
- try:
662
- # Use virsh console to check - this is tricky, so we check for the ready file
663
- result = subprocess.run(
664
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
665
- '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
666
- capture_output=True, text=True, timeout=10
667
- )
668
- if "CloneBox VM ready" in result.stdout or result.returncode == 0:
669
- console.print("[green]✅ Cloud-init: Complete[/]")
670
- else:
671
- console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
672
- except Exception:
673
- console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
674
-
675
- # Check mount status
676
- console.print("\n[bold]💾 Checking mount status...[/]")
677
- try:
678
- # Load config to get expected mounts
679
- config_file = Path.cwd() / ".clonebox.yaml"
680
- if config_file.exists():
681
- config = load_clonebox_config(config_file)
682
- all_paths = config.get("paths", {}).copy()
683
- all_paths.update(config.get("app_data_paths", {}))
684
-
685
- if all_paths:
686
- # Check which mounts are active
687
- result = subprocess.run(
688
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
689
- '{"execute":"guest-exec","arguments":{"path":"/bin/sh","arg":["-c","mount | grep 9p"],"capture-output":true}}'],
690
- capture_output=True, text=True, timeout=10
691
- )
692
-
693
- mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
694
- mount_table.add_column("Guest Path", style="bold")
695
- mount_table.add_column("Status", justify="center")
696
- mount_table.add_column("Files", justify="right")
697
-
698
- mounted_paths = []
699
- if result.returncode == 0 and "return" in result.stdout:
700
- # Parse guest-exec response for mount output
701
- import json
702
- try:
703
- resp = json.loads(result.stdout)
704
- if "return" in resp and "pid" in resp["return"]:
705
- # Get the output from guest-exec-status
706
- pid = resp["return"]["pid"]
707
- status_result = subprocess.run(
708
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
709
- f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
710
- capture_output=True, text=True, timeout=5
711
- )
712
- if status_result.returncode == 0:
713
- status_resp = json.loads(status_result.stdout)
714
- if "return" in status_resp and "out-data" in status_resp["return"]:
715
- import base64
716
- mount_output = base64.b64decode(status_resp["return"]["out-data"]).decode()
717
- mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
718
- except:
719
- pass
720
-
721
- # Check each expected mount
722
- working_mounts = 0
723
- total_mounts = 0
724
- for host_path, guest_path in all_paths.items():
725
- total_mounts += 1
726
- is_mounted = any(guest_path in mp for mp in mounted_paths)
727
-
728
- # Try to get file count
729
- file_count = "?"
730
- if is_mounted:
731
- try:
732
- count_result = subprocess.run(
733
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
734
- f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","ls -A {guest_path} 2>/dev/null | wc -l"],"capture-output":true}}}}'],
735
- capture_output=True, text=True, timeout=5
736
- )
737
- if count_result.returncode == 0:
738
- resp = json.loads(count_result.stdout)
739
- if "return" in resp and "pid" in resp["return"]:
740
- pid = resp["return"]["pid"]
741
- import time
742
- time.sleep(0.5)
743
- status_result = subprocess.run(
744
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
745
- f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
746
- capture_output=True, text=True, timeout=5
747
- )
748
- if status_result.returncode == 0:
749
- status_resp = json.loads(status_result.stdout)
750
- if "return" in status_resp and "out-data" in status_resp["return"]:
751
- file_count = base64.b64decode(status_resp["return"]["out-data"]).decode().strip()
752
- except:
753
- pass
754
-
755
- if is_mounted:
756
- status = "[green]✅ Mounted[/]"
757
- working_mounts += 1
758
- else:
759
- status = "[red]❌ Not mounted[/]"
760
-
761
- mount_table.add_row(guest_path, status, str(file_count))
762
-
763
- console.print(mount_table)
764
- console.print(f"[dim]{working_mounts}/{total_mounts} mounts active[/]")
765
-
766
- if working_mounts < total_mounts:
767
- console.print("[yellow]⚠️ Some mounts are missing. Try remounting in VM:[/]")
768
- console.print("[dim] sudo mount -a[/]")
769
- console.print("[dim]Or rebuild VM with: clonebox clone . --user --run --replace[/]")
770
- else:
771
- console.print("[dim]No mount points configured[/]")
772
- else:
773
- console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
774
- except Exception as e:
775
- console.print(f"[yellow]⚠️ Cannot check mounts: {e}[/]")
776
- console.print("[dim]QEMU guest agent may not be ready yet[/]")
777
-
778
- # Check health status if available
779
- console.print("\n[bold]🏥 Health Check Status...[/]")
780
- try:
781
- result = subprocess.run(
782
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
783
- '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
784
- capture_output=True, text=True, timeout=10
785
- )
786
- if "HEALTH_STATUS=OK" in result.stdout:
787
- console.print("[green]✅ Health: All checks passed[/]")
788
- elif "HEALTH_STATUS=FAILED" in result.stdout:
789
- console.print("[red]❌ Health: Some checks failed[/]")
790
- else:
791
- console.print("[yellow]⏳ Health check not yet run[/]")
792
- except Exception:
793
- console.print("[dim]Health status: Not available yet[/]")
794
-
1081
+
1082
+ run_vm_diagnostics(vm_name, conn_uri, config_file, verbose=False, json_output=False)
1083
+
795
1084
  # Show useful commands
796
1085
  console.print("\n[bold]📋 Useful commands:[/]")
797
- console.print(f" [cyan]virt-viewer --connect {conn_uri} {name}[/] # Open GUI")
798
- console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/] # Console access")
1086
+ console.print(f" [cyan]virt-viewer --connect {conn_uri} {vm_name}[/] # Open GUI")
1087
+ console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/] # Console access")
799
1088
  console.print(" [dim]Inside VM:[/]")
800
1089
  console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
801
1090
  console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
802
1091
  console.print(" [cyan]clonebox-health[/] # Re-run health check")
1092
+ console.print(" [dim]On host:[/]")
1093
+ console.print(" [cyan]clonebox test . --user --validate[/] # Full validation (mounts/packages/services)")
803
1094
 
804
1095
  # Run full health check if requested
805
1096
  if getattr(args, "health", False):
806
1097
  console.print("\n[bold]🔄 Running full health check...[/]")
807
1098
  try:
808
1099
  result = subprocess.run(
809
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
1100
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
810
1101
  '{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
811
1102
  capture_output=True, text=True, timeout=60
812
1103
  )
813
1104
  console.print("[green]Health check triggered. View results with:[/]")
814
- console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/]")
1105
+ console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/]")
815
1106
  console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
816
1107
  except Exception as e:
817
1108
  console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
@@ -2050,6 +2341,64 @@ def main():
2050
2341
  )
2051
2342
  list_parser.set_defaults(func=cmd_list)
2052
2343
 
2344
+ # Container command
2345
+ container_parser = subparsers.add_parser("container", help="Manage container sandboxes")
2346
+ container_parser.add_argument(
2347
+ "--engine",
2348
+ choices=["auto", "podman", "docker"],
2349
+ default="auto",
2350
+ help="Container engine: auto (default), podman, docker",
2351
+ )
2352
+ container_sub = container_parser.add_subparsers(dest="container_command", help="Container commands")
2353
+
2354
+ container_up = container_sub.add_parser("up", help="Start container")
2355
+ container_up.add_argument("path", nargs="?", default=".", help="Workspace path")
2356
+ container_up.add_argument("--name", help="Container name")
2357
+ container_up.add_argument("--image", default="ubuntu:22.04", help="Container image")
2358
+ container_up.add_argument("--detach", action="store_true", help="Run container in background")
2359
+ container_up.add_argument(
2360
+ "--mount",
2361
+ action="append",
2362
+ default=[],
2363
+ help="Extra mount HOST:CONTAINER (repeatable)",
2364
+ )
2365
+ container_up.add_argument(
2366
+ "--port",
2367
+ action="append",
2368
+ default=[],
2369
+ help="Port mapping (e.g. 8080:80) (repeatable)",
2370
+ )
2371
+ container_up.add_argument(
2372
+ "--package",
2373
+ action="append",
2374
+ default=[],
2375
+ help="APT package to install in image (repeatable)",
2376
+ )
2377
+ container_up.add_argument(
2378
+ "--no-dotenv",
2379
+ action="store_true",
2380
+ help="Do not load env vars from workspace .env",
2381
+ )
2382
+ container_up.set_defaults(func=cmd_container_up)
2383
+
2384
+ container_ps = container_sub.add_parser("ps", aliases=["ls"], help="List containers")
2385
+ container_ps.add_argument("-a", "--all", action="store_true", help="Show all containers")
2386
+ container_ps.add_argument("--json", action="store_true", help="Output JSON")
2387
+ container_ps.set_defaults(func=cmd_container_ps)
2388
+
2389
+ container_stop = container_sub.add_parser("stop", help="Stop container")
2390
+ container_stop.add_argument("name", help="Container name")
2391
+ container_stop.set_defaults(func=cmd_container_stop)
2392
+
2393
+ container_rm = container_sub.add_parser("rm", help="Remove container")
2394
+ container_rm.add_argument("name", help="Container name")
2395
+ container_rm.add_argument("-f", "--force", action="store_true", help="Force remove")
2396
+ container_rm.set_defaults(func=cmd_container_rm)
2397
+
2398
+ container_down = container_sub.add_parser("down", help="Stop and remove container")
2399
+ container_down.add_argument("name", help="Container name")
2400
+ container_down.set_defaults(func=cmd_container_down)
2401
+
2053
2402
  # Detect command
2054
2403
  detect_parser = subparsers.add_parser("detect", help="Detect system state")
2055
2404
  detect_parser.add_argument("--json", action="store_true", help="Output as JSON")
@@ -2117,6 +2466,27 @@ def main():
2117
2466
  )
2118
2467
  status_parser.set_defaults(func=cmd_status)
2119
2468
 
2469
+ # Diagnose command - detailed diagnostics from workstation
2470
+ diagnose_parser = subparsers.add_parser(
2471
+ "diagnose", aliases=["diag"], help="Run detailed VM diagnostics"
2472
+ )
2473
+ diagnose_parser.add_argument(
2474
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
2475
+ )
2476
+ diagnose_parser.add_argument(
2477
+ "-u",
2478
+ "--user",
2479
+ action="store_true",
2480
+ help="Use user session (qemu:///session)",
2481
+ )
2482
+ diagnose_parser.add_argument(
2483
+ "--verbose", "-v", action="store_true", help="Show more low-level details"
2484
+ )
2485
+ diagnose_parser.add_argument(
2486
+ "--json", action="store_true", help="Print diagnostics as JSON"
2487
+ )
2488
+ diagnose_parser.set_defaults(func=cmd_diagnose)
2489
+
2120
2490
  # Export command - package VM for migration
2121
2491
  export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
2122
2492
  export_parser.add_argument(
clonebox/cloner.py CHANGED
@@ -668,7 +668,7 @@ fi
668
668
  tag = f"mount{idx}"
669
669
  # Use uid=1000,gid=1000 to give ubuntu user access to mounts
670
670
  # mmap allows proper file mapping
671
- mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000"
671
+ mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000,users"
672
672
  mount_commands.append(f" - mkdir -p {guest_path}")
673
673
  mount_commands.append(f" - chown 1000:1000 {guest_path}")
674
674
  mount_commands.append(
@@ -679,34 +679,30 @@ fi
679
679
 
680
680
  # User-data
681
681
  # Add desktop environment if GUI is enabled
682
- base_packages = []
682
+ runcmd_lines = []
683
+ base_packages = ["qemu-guest-agent"]
683
684
  if config.gui:
684
- base_packages.extend([
685
- "ubuntu-desktop-minimal",
686
- "firefox",
687
- ])
685
+ base_packages.extend(
686
+ [
687
+ "ubuntu-desktop-minimal",
688
+ "firefox",
689
+ ]
690
+ )
688
691
 
689
692
  all_packages = base_packages + list(config.packages)
690
- packages_yaml = (
691
- "\n".join(f" - {pkg}" for pkg in all_packages) if all_packages else ""
692
- )
693
-
694
- # Build runcmd - services, mounts, snaps, post_commands
695
- runcmd_lines = []
696
-
697
- # Add service enablement
698
- for svc in config.services:
699
- runcmd_lines.append(f" - systemctl enable --now {svc} || true")
700
-
693
+ packages_yaml = "\n".join([f" - {pkg}" for pkg in all_packages])
694
+
701
695
  # Add fstab entries for persistent mounts after reboot
702
696
  if fstab_entries:
703
- runcmd_lines.append(" - echo '# CloneBox 9p mounts' >> /etc/fstab")
697
+ runcmd_lines.append(" - grep -q '^# CloneBox 9p mounts' /etc/fstab || echo '# CloneBox 9p mounts' >> /etc/fstab")
704
698
  for entry in fstab_entries:
705
- runcmd_lines.append(f" - echo '{entry}' >> /etc/fstab")
699
+ runcmd_lines.append(f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab")
700
+ runcmd_lines.append(" - mount -a || true")
706
701
 
707
- # Add mounts (immediate, before reboot)
708
- for cmd in mount_commands:
709
- runcmd_lines.append(cmd)
702
+ # Install APT packages
703
+ runcmd_lines.append(" - echo 'Installing APT packages...'")
704
+ for pkg in config.packages:
705
+ runcmd_lines.append(f" - apt-get install -y {pkg} || true")
710
706
 
711
707
  # Install snap packages
712
708
  if config.snap_packages:
@@ -740,6 +736,8 @@ fi
740
736
  runcmd_lines.append(" - sleep 10 && reboot")
741
737
 
742
738
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
739
+ bootcmd_yaml = "\n".join(mount_commands) if mount_commands else ""
740
+ bootcmd_block = f"\nbootcmd:\n{bootcmd_yaml}\n" if bootcmd_yaml else ""
743
741
 
744
742
  # Remove power_state - using shutdown -r instead
745
743
  power_state_yaml = ""
@@ -765,6 +763,7 @@ chpasswd:
765
763
  # Update package cache and upgrade
766
764
  package_update: true
767
765
  package_upgrade: false
766
+ {bootcmd_block}
768
767
 
769
768
  # Install packages (cloud-init waits for completion before runcmd)
770
769
  packages:
@@ -811,8 +810,6 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
811
810
  try:
812
811
  vm = self.conn.lookupByName(vm_name)
813
812
  except libvirt.libvirtError:
814
- if ignore_not_found:
815
- return False
816
813
  log(f"[red]❌ VM '{vm_name}' not found[/]")
817
814
  return False
818
815
 
clonebox/container.py ADDED
@@ -0,0 +1,190 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import json
4
+ import shutil
5
+ import subprocess
6
+ import tempfile
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ from clonebox.models import ContainerConfig
11
+
12
+
13
+ class ContainerCloner:
14
+ def __init__(self, engine: str = "auto"):
15
+ self.engine = self._resolve_engine(engine)
16
+
17
+ def _resolve_engine(self, engine: str) -> str:
18
+ if engine == "auto":
19
+ return self.detect_engine()
20
+ if engine not in {"podman", "docker"}:
21
+ raise ValueError("engine must be one of: auto, podman, docker")
22
+ if shutil.which(engine) is None:
23
+ raise RuntimeError(f"Container engine not found: {engine}")
24
+ self._run([engine, "--version"], check=True)
25
+ return engine
26
+
27
+ def detect_engine(self) -> str:
28
+ if shutil.which("podman") is not None:
29
+ try:
30
+ self._run(["podman", "--version"], check=True)
31
+ return "podman"
32
+ except Exception:
33
+ pass
34
+
35
+ if shutil.which("docker") is not None:
36
+ try:
37
+ self._run(["docker", "--version"], check=True)
38
+ return "docker"
39
+ except Exception:
40
+ pass
41
+
42
+ raise RuntimeError("No container engine found (podman/docker)")
43
+
44
+ def _run(
45
+ self,
46
+ cmd: List[str],
47
+ check: bool = True,
48
+ capture_output: bool = True,
49
+ text: bool = True,
50
+ ) -> subprocess.CompletedProcess:
51
+ return subprocess.run(cmd, check=check, capture_output=capture_output, text=text)
52
+
53
+ def build_dockerfile(self, config: ContainerConfig) -> str:
54
+ lines: List[str] = [f"FROM {config.image}"]
55
+
56
+ if config.packages:
57
+ pkgs = " ".join(config.packages)
58
+ lines.append(
59
+ "RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y "
60
+ + pkgs
61
+ + " && rm -rf /var/lib/apt/lists/*"
62
+ )
63
+
64
+ lines.append("WORKDIR /workspace")
65
+ lines.append('CMD ["bash"]')
66
+ return "\n".join(lines) + "\n"
67
+
68
+ def build_image(self, config: ContainerConfig, tag: Optional[str] = None) -> str:
69
+ if tag is None:
70
+ tag = f"{config.name}:latest"
71
+
72
+ dockerfile = self.build_dockerfile(config)
73
+ workspace = Path(config.workspace).resolve()
74
+
75
+ with tempfile.NamedTemporaryFile(prefix="clonebox-dockerfile-", delete=False) as f:
76
+ dockerfile_path = Path(f.name)
77
+ f.write(dockerfile.encode())
78
+
79
+ try:
80
+ self._run(
81
+ [
82
+ self.engine,
83
+ "build",
84
+ "-f",
85
+ str(dockerfile_path),
86
+ "-t",
87
+ tag,
88
+ str(workspace),
89
+ ],
90
+ check=True,
91
+ )
92
+ finally:
93
+ try:
94
+ dockerfile_path.unlink()
95
+ except Exception:
96
+ pass
97
+
98
+ return tag
99
+
100
+ def up(self, config: ContainerConfig, detach: bool = False, remove: bool = True) -> None:
101
+ engine = self._resolve_engine(config.engine if config.engine != "auto" else self.engine)
102
+
103
+ image = config.image
104
+ if config.packages:
105
+ image = self.build_image(config)
106
+
107
+ cmd: List[str] = [engine, "run"]
108
+ cmd.append("-d" if detach else "-it")
109
+
110
+ if remove:
111
+ cmd.append("--rm")
112
+
113
+ cmd.extend(["--name", config.name])
114
+ cmd.extend(["-w", "/workspace"])
115
+
116
+ env_file = Path(config.workspace) / ".env"
117
+ if config.env_from_dotenv and env_file.exists():
118
+ cmd.extend(["--env-file", str(env_file)])
119
+
120
+ for src, dst in config.mounts.items():
121
+ cmd.extend(["-v", f"{src}:{dst}"])
122
+
123
+ for p in config.ports:
124
+ cmd.extend(["-p", p])
125
+
126
+ cmd.append(image)
127
+
128
+ if detach:
129
+ cmd.extend(["sleep", "infinity"])
130
+ else:
131
+ cmd.append("bash")
132
+
133
+ subprocess.run(cmd, check=True)
134
+
135
+ def stop(self, name: str) -> None:
136
+ subprocess.run([self.engine, "stop", name], check=True)
137
+
138
+ def rm(self, name: str, force: bool = False) -> None:
139
+ cmd = [self.engine, "rm"]
140
+ if force:
141
+ cmd.append("-f")
142
+ cmd.append(name)
143
+ subprocess.run(cmd, check=True)
144
+
145
+ def ps(self, all: bool = False) -> List[Dict[str, Any]]:
146
+ if self.engine == "podman":
147
+ cmd = ["podman", "ps", "--format", "json"]
148
+ if all:
149
+ cmd.append("-a")
150
+ result = self._run(cmd, check=True)
151
+ try:
152
+ parsed = json.loads(result.stdout or "[]")
153
+ except json.JSONDecodeError:
154
+ return []
155
+
156
+ items: List[Dict[str, Any]] = []
157
+ for c in parsed:
158
+ name = ""
159
+ names = c.get("Names")
160
+ if isinstance(names, list) and names:
161
+ name = str(names[0])
162
+ elif isinstance(names, str):
163
+ name = names
164
+
165
+ items.append(
166
+ {
167
+ "name": name,
168
+ "image": c.get("Image") or c.get("ImageName") or "",
169
+ "status": c.get("State") or c.get("Status") or "",
170
+ "ports": c.get("Ports") or [],
171
+ }
172
+ )
173
+ return items
174
+
175
+ cmd = ["docker", "ps", "--format", "{{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}"]
176
+ if all:
177
+ cmd.insert(2, "-a")
178
+
179
+ result = self._run(cmd, check=True)
180
+ items: List[Dict[str, Any]] = []
181
+ for line in (result.stdout or "").splitlines():
182
+ if not line.strip():
183
+ continue
184
+ parts = line.split("\t")
185
+ name = parts[0] if len(parts) > 0 else ""
186
+ image = parts[1] if len(parts) > 1 else ""
187
+ status = parts[2] if len(parts) > 2 else ""
188
+ ports = parts[3] if len(parts) > 3 else ""
189
+ items.append({"name": name, "image": image, "status": status, "ports": ports})
190
+ return items
clonebox/models.py CHANGED
@@ -4,7 +4,8 @@ Pydantic models for CloneBox configuration validation.
4
4
  """
5
5
 
6
6
  from pathlib import Path
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, Dict, List, Literal, Optional
8
+ from uuid import uuid4
8
9
 
9
10
  from pydantic import BaseModel, Field, field_validator, model_validator
10
11
 
@@ -124,5 +125,73 @@ class CloneBoxConfig(BaseModel):
124
125
  )
125
126
 
126
127
 
128
+ class ContainerConfig(BaseModel):
129
+ name: str = Field(default_factory=lambda: f"clonebox-{uuid4().hex[:8]}")
130
+ engine: Literal["auto", "podman", "docker"] = "auto"
131
+ image: str = "ubuntu:22.04"
132
+ workspace: Path = Path(".")
133
+ extra_mounts: Dict[str, str] = Field(default_factory=dict)
134
+ env_from_dotenv: bool = True
135
+ packages: List[str] = Field(default_factory=list)
136
+ ports: List[str] = Field(default_factory=list)
137
+
138
+ @field_validator("name")
139
+ @classmethod
140
+ def name_must_be_valid(cls, v: str) -> str:
141
+ if not v or not v.strip():
142
+ raise ValueError("Container name cannot be empty")
143
+ if len(v) > 64:
144
+ raise ValueError("Container name must be <= 64 characters")
145
+ return v.strip()
146
+
147
+ @field_validator("extra_mounts")
148
+ @classmethod
149
+ def extra_mounts_must_be_absolute(cls, v: Dict[str, str]) -> Dict[str, str]:
150
+ for host_path, container_path in v.items():
151
+ if not str(host_path).startswith("/"):
152
+ raise ValueError(f"Host path must be absolute: {host_path}")
153
+ if not str(container_path).startswith("/"):
154
+ raise ValueError(f"Container path must be absolute: {container_path}")
155
+ return v
156
+
157
+ @field_validator("ports")
158
+ @classmethod
159
+ def ports_must_be_valid(cls, v: List[str]) -> List[str]:
160
+ for p in v:
161
+ if not isinstance(p, str) or not p.strip():
162
+ raise ValueError("Port mapping cannot be empty")
163
+ if ":" in p:
164
+ host, container = p.split(":", 1)
165
+ if not host.isdigit() or not container.isdigit():
166
+ raise ValueError(f"Invalid port mapping: {p}")
167
+ else:
168
+ if not p.isdigit():
169
+ raise ValueError(f"Invalid port value: {p}")
170
+ return v
171
+
172
+ @property
173
+ def mounts(self) -> Dict[str, str]:
174
+ mounts: Dict[str, str] = {
175
+ str(self.workspace.resolve()): "/workspace",
176
+ }
177
+ mounts.update(self.extra_mounts)
178
+ return mounts
179
+
180
+ def to_docker_run_cmd(self) -> List[str]:
181
+ if self.engine == "auto":
182
+ raise ValueError("engine must be resolved before generating run command")
183
+
184
+ cmd: List[str] = [self.engine, "run", "-it", "--rm", "--name", self.name]
185
+
186
+ for src, dst in self.mounts.items():
187
+ cmd.extend(["-v", f"{src}:{dst}"])
188
+
189
+ for p in self.ports:
190
+ cmd.extend(["-p", p])
191
+
192
+ cmd.append(self.image)
193
+ return cmd
194
+
195
+
127
196
  # Backwards compatibility alias
128
197
  VMConfigModel = CloneBoxConfig
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.15
3
+ Version: 0.1.16
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -0,0 +1,14 @@
1
+ clonebox/__init__.py,sha256=C1J7Uwrp8H9Zopo5JgrQYzXg-PWls1JdqmE_0Qp1Tro,408
2
+ clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
+ clonebox/cli.py,sha256=CwJ78Em2bma6jKNjrJYmM7B3rhzNDKDMLQ-3AEx00G4,98101
4
+ clonebox/cloner.py,sha256=bSOOkfqvF1PaUhVke0GaFYz29-eA9CigiAILP-WDhjs,32221
5
+ clonebox/container.py,sha256=tiYK1ZB-DhdD6A2FuMA0h_sRNkUI7KfYcJ0tFOcdyeM,6105
6
+ clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
7
+ clonebox/models.py,sha256=Uxz9eHov2epJpNYbl0ejaOX91iMSjqdHskGdC8-smVk,7789
8
+ clonebox/validator.py,sha256=8HV3ahfiLkFDOH4UOmZr7-fGfhKep1Jlw1joJeWSaQE,15858
9
+ clonebox-0.1.16.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
10
+ clonebox-0.1.16.dist-info/METADATA,sha256=ZT06tRqwiEyYiqnrkHchd30VZgwWo0Cb6NhRrMu00_g,35220
11
+ clonebox-0.1.16.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
12
+ clonebox-0.1.16.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
13
+ clonebox-0.1.16.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
14
+ clonebox-0.1.16.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- clonebox/__init__.py,sha256=C1J7Uwrp8H9Zopo5JgrQYzXg-PWls1JdqmE_0Qp1Tro,408
2
- clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
- clonebox/cli.py,sha256=xo7PJx9XODx9dfIbmuikDncTIGtpU3aAW3-S4iCxv-s,86697
4
- clonebox/cloner.py,sha256=fVfphsPbsqW4ASnv4bkrDIL8Ks9aPUvxx-IOO_d2FTw,32102
5
- clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
- clonebox/models.py,sha256=l3z1gm4TAIKzikUrQQn9yfxI62vrQRuHQxV1uftY0fY,5260
7
- clonebox/validator.py,sha256=8HV3ahfiLkFDOH4UOmZr7-fGfhKep1Jlw1joJeWSaQE,15858
8
- clonebox-0.1.15.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
9
- clonebox-0.1.15.dist-info/METADATA,sha256=Tg6u-MfJXaO2MrdlsnFFw584tGPrVINffw6ydx2OrH4,35220
10
- clonebox-0.1.15.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
11
- clonebox-0.1.15.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
12
- clonebox-0.1.15.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
13
- clonebox-0.1.15.dist-info/RECORD,,