clonebox 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cli.py CHANGED
@@ -58,6 +58,361 @@ def print_banner():
58
58
  console.print(f" Version {__version__}\n", style="dim")
59
59
 
60
60
 
61
+ def _resolve_vm_name_and_config_file(name: Optional[str]) -> tuple[str, Optional[Path]]:
62
+ config_file: Optional[Path] = None
63
+
64
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
65
+ target_path = Path(name).expanduser().resolve()
66
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
67
+ if config_file.exists():
68
+ config = load_clonebox_config(config_file)
69
+ return config["vm"]["name"], config_file
70
+ raise FileNotFoundError(f"Config not found: {config_file}")
71
+
72
+ if not name:
73
+ config_file = Path.cwd() / ".clonebox.yaml"
74
+ if config_file.exists():
75
+ config = load_clonebox_config(config_file)
76
+ return config["vm"]["name"], config_file
77
+ raise FileNotFoundError("No VM name specified and no .clonebox.yaml found")
78
+
79
+ return name, None
80
+
81
+
82
+ def _qga_ping(vm_name: str, conn_uri: str) -> bool:
83
+ def _qga_ping() -> bool:
84
+ try:
85
+ result = subprocess.run(
86
+ [
87
+ "virsh",
88
+ "--connect",
89
+ conn_uri,
90
+ "qemu-agent-command",
91
+ vm_name,
92
+ json.dumps({"execute": "guest-ping"}),
93
+ ],
94
+ capture_output=True,
95
+ text=True,
96
+ timeout=5,
97
+ )
98
+ return result.returncode == 0
99
+ except Exception:
100
+ return False
101
+
102
+ return _qga_ping()
103
+
104
+
105
+ def _qga_exec(vm_name: str, conn_uri: str, command: str, timeout: int = 10) -> Optional[str]:
106
+ def _qga_exec() -> Optional[str]:
107
+ try:
108
+ payload = {
109
+ "execute": "guest-exec",
110
+ "arguments": {
111
+ "path": "/bin/sh",
112
+ "arg": ["-c", command],
113
+ "capture-output": True,
114
+ },
115
+ }
116
+ exec_result = subprocess.run(
117
+ [
118
+ "virsh",
119
+ "--connect",
120
+ conn_uri,
121
+ "qemu-agent-command",
122
+ vm_name,
123
+ json.dumps(payload),
124
+ ],
125
+ capture_output=True,
126
+ text=True,
127
+ timeout=timeout,
128
+ )
129
+ if exec_result.returncode != 0:
130
+ return None
131
+
132
+ resp = json.loads(exec_result.stdout)
133
+ pid = resp.get("return", {}).get("pid")
134
+ if not pid:
135
+ return None
136
+
137
+ import base64
138
+ import time
139
+
140
+ deadline = time.time() + timeout
141
+ while time.time() < deadline:
142
+ status_payload = {"execute": "guest-exec-status", "arguments": {"pid": pid}}
143
+ status_result = subprocess.run(
144
+ [
145
+ "virsh",
146
+ "--connect",
147
+ conn_uri,
148
+ "qemu-agent-command",
149
+ vm_name,
150
+ json.dumps(status_payload),
151
+ ],
152
+ capture_output=True,
153
+ text=True,
154
+ timeout=5,
155
+ )
156
+ if status_result.returncode != 0:
157
+ return None
158
+
159
+ status_resp = json.loads(status_result.stdout)
160
+ ret = status_resp.get("return", {})
161
+ if not ret.get("exited", False):
162
+ time.sleep(0.3)
163
+ continue
164
+
165
+ out_data = ret.get("out-data")
166
+ if out_data:
167
+ return base64.b64decode(out_data).decode().strip()
168
+ return ""
169
+
170
+ return None
171
+ except Exception:
172
+ return None
173
+
174
+ return _qga_exec()
175
+
176
+
177
+ def run_vm_diagnostics(
178
+ vm_name: str,
179
+ conn_uri: str,
180
+ config_file: Optional[Path],
181
+ *,
182
+ verbose: bool = False,
183
+ json_output: bool = False,
184
+ ) -> dict:
185
+ import subprocess
186
+
187
+ result: dict = {
188
+ "vm": {"name": vm_name, "conn_uri": conn_uri},
189
+ "state": {},
190
+ "network": {},
191
+ "qga": {},
192
+ "cloud_init": {},
193
+ "mounts": {},
194
+ "health": {},
195
+ }
196
+
197
+ console.print(f"[bold cyan]🧪 Diagnostics: {vm_name}[/]\n")
198
+
199
+ try:
200
+ domstate = subprocess.run(
201
+ ["virsh", "--connect", conn_uri, "domstate", vm_name],
202
+ capture_output=True,
203
+ text=True,
204
+ timeout=5,
205
+ )
206
+ result["state"] = {
207
+ "returncode": domstate.returncode,
208
+ "stdout": domstate.stdout.strip(),
209
+ "stderr": domstate.stderr.strip(),
210
+ }
211
+ if domstate.returncode == 0 and domstate.stdout.strip():
212
+ console.print(f"[green]✅ VM State: {domstate.stdout.strip()}[/]")
213
+ else:
214
+ console.print("[red]❌ VM State: unable to read[/]")
215
+ if verbose and domstate.stderr.strip():
216
+ console.print(f"[dim]{domstate.stderr.strip()}[/]")
217
+ except subprocess.TimeoutExpired:
218
+ result["state"] = {"error": "timeout"}
219
+ console.print("[red]❌ VM State: timeout[/]")
220
+ if json_output:
221
+ console.print_json(json.dumps(result))
222
+ return result
223
+
224
+ console.print("\n[bold]🔍 Checking VM network...[/]")
225
+ try:
226
+ domifaddr = subprocess.run(
227
+ ["virsh", "--connect", conn_uri, "domifaddr", vm_name],
228
+ capture_output=True,
229
+ text=True,
230
+ timeout=10,
231
+ )
232
+ result["network"] = {
233
+ "returncode": domifaddr.returncode,
234
+ "stdout": domifaddr.stdout.strip(),
235
+ "stderr": domifaddr.stderr.strip(),
236
+ }
237
+ if domifaddr.stdout.strip():
238
+ console.print(f"[dim]{domifaddr.stdout.strip()}[/]")
239
+ else:
240
+ console.print("[yellow]⚠️ No interface address detected yet[/]")
241
+ if verbose and domifaddr.stderr.strip():
242
+ console.print(f"[dim]{domifaddr.stderr.strip()}[/]")
243
+ except Exception as e:
244
+ result["network"] = {"error": str(e)}
245
+ console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
246
+
247
+ guest_agent_ready = _qga_ping(vm_name, conn_uri)
248
+ result["qga"]["ready"] = guest_agent_ready
249
+ if verbose:
250
+ console.print("\n[bold]🤖 QEMU Guest Agent...[/]")
251
+ console.print(f"{'[green]✅' if guest_agent_ready else '[red]❌'} QGA connected")
252
+
253
+ if not guest_agent_ready:
254
+ try:
255
+ dumpxml = subprocess.run(
256
+ ["virsh", "--connect", conn_uri, "dumpxml", vm_name],
257
+ capture_output=True,
258
+ text=True,
259
+ timeout=10,
260
+ )
261
+ has_qga_channel = False
262
+ if dumpxml.returncode == 0:
263
+ has_qga_channel = "org.qemu.guest_agent.0" in dumpxml.stdout
264
+ result["qga"]["dumpxml_returncode"] = dumpxml.returncode
265
+ result["qga"]["has_channel"] = has_qga_channel
266
+ if dumpxml.stderr.strip():
267
+ result["qga"]["dumpxml_stderr"] = dumpxml.stderr.strip()
268
+
269
+ console.print(
270
+ f"[dim]Guest agent channel in VM XML: {'present' if has_qga_channel else 'missing'}[/]"
271
+ )
272
+ except Exception as e:
273
+ result["qga"]["dumpxml_error"] = str(e)
274
+
275
+ try:
276
+ ping_attempt = subprocess.run(
277
+ [
278
+ "virsh",
279
+ "--connect",
280
+ conn_uri,
281
+ "qemu-agent-command",
282
+ vm_name,
283
+ json.dumps({"execute": "guest-ping"}),
284
+ ],
285
+ capture_output=True,
286
+ text=True,
287
+ timeout=10,
288
+ )
289
+ result["qga"]["ping_returncode"] = ping_attempt.returncode
290
+ result["qga"]["ping_stdout"] = ping_attempt.stdout.strip()
291
+ result["qga"]["ping_stderr"] = ping_attempt.stderr.strip()
292
+ if ping_attempt.stderr.strip():
293
+ console.print(f"[dim]qemu-agent-command stderr: {ping_attempt.stderr.strip()}[/]")
294
+ except Exception as e:
295
+ result["qga"]["ping_error"] = str(e)
296
+
297
+ console.print("[dim]If channel is present, the agent inside VM may not be running yet.[/]")
298
+ console.print("[dim]Inside VM try: sudo systemctl status qemu-guest-agent && sudo systemctl restart qemu-guest-agent[/]")
299
+
300
+ console.print("\n[bold]☁️ Checking cloud-init status...[/]")
301
+ cloud_init_complete = False
302
+ if not guest_agent_ready:
303
+ result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
304
+ console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU guest agent not connected yet)[/]")
305
+ else:
306
+ ready_msg = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-ready 2>/dev/null || true", timeout=10)
307
+ result["cloud_init"]["clonebox_ready_file"] = ready_msg
308
+ if ready_msg and "CloneBox VM ready" in ready_msg:
309
+ cloud_init_complete = True
310
+ result["cloud_init"]["status"] = "complete"
311
+ console.print("[green]✅ Cloud-init: Complete[/]")
312
+ else:
313
+ ci_status = _qga_exec(vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=10)
314
+ result["cloud_init"]["cloud_init_status"] = ci_status
315
+ result["cloud_init"]["status"] = "running"
316
+ console.print("[yellow]⏳ Cloud-init: Still running[/]")
317
+ if verbose and ci_status:
318
+ console.print(f"[dim]{ci_status}[/]")
319
+
320
+ console.print("\n[bold]💾 Checking mount status...[/]")
321
+ if not cloud_init_complete:
322
+ console.print("[dim]Mounts may not be ready until cloud-init completes.[/]")
323
+
324
+ mounts_detail: list[dict] = []
325
+ result["mounts"]["details"] = mounts_detail
326
+ if not guest_agent_ready:
327
+ console.print("[yellow]⏳ QEMU guest agent not connected yet - cannot verify mounts.[/]")
328
+ result["mounts"]["status"] = "unknown"
329
+ else:
330
+ if not config_file:
331
+ config_file = Path.cwd() / ".clonebox.yaml"
332
+
333
+ if not config_file.exists():
334
+ console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
335
+ result["mounts"]["status"] = "no_config"
336
+ else:
337
+ config = load_clonebox_config(config_file)
338
+ all_paths = config.get("paths", {}).copy()
339
+ all_paths.update(config.get("app_data_paths", {}))
340
+ result["mounts"]["expected"] = list(all_paths.values())
341
+ mount_output = _qga_exec(vm_name, conn_uri, "mount | grep 9p || true", timeout=10) or ""
342
+ mounted_paths = [line.split()[2] for line in mount_output.split("\n") if line.strip()]
343
+ result["mounts"]["mounted_paths"] = mounted_paths
344
+
345
+ mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
346
+ mount_table.add_column("Guest Path", style="bold")
347
+ mount_table.add_column("Mounted", justify="center")
348
+ mount_table.add_column("Accessible", justify="center")
349
+ mount_table.add_column("Files", justify="right")
350
+
351
+ working_mounts = 0
352
+ total_mounts = 0
353
+ for _, guest_path in all_paths.items():
354
+ total_mounts += 1
355
+ is_mounted = any(guest_path == mp or guest_path in mp for mp in mounted_paths)
356
+ accessible = False
357
+ file_count: str = "?"
358
+
359
+ if is_mounted:
360
+ test_out = _qga_exec(vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5)
361
+ accessible = test_out == "yes"
362
+ if accessible:
363
+ count_str = _qga_exec(vm_name, conn_uri, f"ls -A {guest_path} 2>/dev/null | wc -l", timeout=5)
364
+ if count_str and count_str.strip().isdigit():
365
+ file_count = count_str.strip()
366
+
367
+ if is_mounted and accessible:
368
+ working_mounts += 1
369
+
370
+ mount_table.add_row(
371
+ guest_path,
372
+ "[green]✅[/]" if is_mounted else "[red]❌[/]",
373
+ "[green]✅[/]" if accessible else ("[red]❌[/]" if is_mounted else "[dim]N/A[/]"),
374
+ file_count,
375
+ )
376
+ mounts_detail.append(
377
+ {
378
+ "guest_path": guest_path,
379
+ "mounted": is_mounted,
380
+ "accessible": accessible,
381
+ "files": file_count,
382
+ }
383
+ )
384
+
385
+ result["mounts"]["working"] = working_mounts
386
+ result["mounts"]["total"] = total_mounts
387
+ result["mounts"]["status"] = "ok" if working_mounts == total_mounts else "partial"
388
+
389
+ console.print(mount_table)
390
+ console.print(f"[dim]{working_mounts}/{total_mounts} mounts working[/]")
391
+
392
+ console.print("\n[bold]🏥 Health Check Status...[/]")
393
+ if not guest_agent_ready:
394
+ result["health"]["status"] = "unknown"
395
+ console.print("[dim]Health status: Not available yet (QEMU guest agent not ready)[/]")
396
+ else:
397
+ health_status = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10)
398
+ result["health"]["raw"] = health_status
399
+ if health_status and "HEALTH_STATUS=OK" in health_status:
400
+ result["health"]["status"] = "ok"
401
+ console.print("[green]✅ Health: All checks passed[/]")
402
+ elif health_status and "HEALTH_STATUS=FAILED" in health_status:
403
+ result["health"]["status"] = "failed"
404
+ console.print("[red]❌ Health: Some checks failed[/]")
405
+ else:
406
+ result["health"]["status"] = "not_run"
407
+ console.print("[yellow]⏳ Health check not yet run[/]")
408
+ if verbose and health_status:
409
+ console.print(f"[dim]{health_status}[/]")
410
+
411
+ if json_output:
412
+ console.print_json(json.dumps(result))
413
+ return result
414
+
415
+
61
416
  def interactive_mode():
62
417
  """Run the interactive VM creation wizard."""
63
418
  print_banner()
@@ -345,14 +700,13 @@ def interactive_mode():
345
700
  if questionary.confirm("Start VM now?", default=True, style=custom_style).ask():
346
701
  cloner.start_vm(vm_name, open_viewer=enable_gui, console=console)
347
702
  console.print("\n[bold green]🎉 VM is running![/]")
703
+ console.print(f"\n[dim]UUID: {vm_uuid}[/]")
348
704
 
349
705
  if paths_mapping:
350
706
  console.print("\n[bold]Inside the VM, mount shared folders with:[/]")
351
707
  for idx, (host, guest) in enumerate(paths_mapping.items()):
352
708
  console.print(f" [cyan]sudo mount -t 9p -o trans=virtio mount{idx} {guest}[/]")
353
709
 
354
- console.print(f"\n[dim]VM UUID: {vm_uuid}[/]")
355
-
356
710
  except Exception as e:
357
711
  console.print(f"\n[red]❌ Error: {e}[/]")
358
712
  raise
@@ -579,239 +933,177 @@ def cmd_list(args):
579
933
  console.print(table)
580
934
 
581
935
 
936
+ def cmd_container_up(args):
937
+ """Start a container sandbox."""
938
+ try:
939
+ from clonebox.container import ContainerCloner
940
+ from clonebox.models import ContainerConfig
941
+ except ModuleNotFoundError as e:
942
+ raise ModuleNotFoundError(
943
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
944
+ ) from e
945
+
946
+ mounts = {}
947
+ for m in getattr(args, "mount", []) or []:
948
+ if ":" not in m:
949
+ raise ValueError(f"Invalid mount: {m} (expected HOST:CONTAINER)")
950
+ host, container_path = m.split(":", 1)
951
+ mounts[host] = container_path
952
+
953
+ cfg_kwargs: dict = {
954
+ "engine": getattr(args, "engine", "auto"),
955
+ "image": getattr(args, "image", "ubuntu:22.04"),
956
+ "workspace": Path(getattr(args, "path", ".")),
957
+ "extra_mounts": mounts,
958
+ "env_from_dotenv": not getattr(args, "no_dotenv", False),
959
+ "packages": getattr(args, "package", []) or [],
960
+ "ports": getattr(args, "port", []) or [],
961
+ }
962
+ if getattr(args, "name", None):
963
+ cfg_kwargs["name"] = args.name
964
+
965
+ cfg = ContainerConfig(**cfg_kwargs)
966
+
967
+ cloner = ContainerCloner(engine=cfg.engine)
968
+ cloner.up(cfg, detach=getattr(args, "detach", False))
969
+
970
+
971
+ def cmd_container_ps(args):
972
+ """List containers."""
973
+ try:
974
+ from clonebox.container import ContainerCloner
975
+ except ModuleNotFoundError as e:
976
+ raise ModuleNotFoundError(
977
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
978
+ ) from e
979
+
980
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
981
+ items = cloner.ps(all=getattr(args, "all", False))
982
+
983
+ if getattr(args, "json", False):
984
+ print(json.dumps(items, indent=2))
985
+ return
986
+
987
+ if not items:
988
+ console.print("[dim]No containers found.[/]")
989
+ return
990
+
991
+ table = Table(title="Containers", border_style="cyan")
992
+ table.add_column("Name", style="bold")
993
+ table.add_column("Image")
994
+ table.add_column("Status")
995
+ table.add_column("Ports")
996
+
997
+ for c in items:
998
+ table.add_row(
999
+ str(c.get("name", "")),
1000
+ str(c.get("image", "")),
1001
+ str(c.get("status", "")),
1002
+ str(c.get("ports", "")),
1003
+ )
1004
+
1005
+ console.print(table)
1006
+
1007
+
1008
+ def cmd_container_stop(args):
1009
+ """Stop a container."""
1010
+ try:
1011
+ from clonebox.container import ContainerCloner
1012
+ except ModuleNotFoundError as e:
1013
+ raise ModuleNotFoundError(
1014
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1015
+ ) from e
1016
+
1017
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1018
+ cloner.stop(args.name)
1019
+
1020
+
1021
+ def cmd_container_rm(args):
1022
+ """Remove a container."""
1023
+ try:
1024
+ from clonebox.container import ContainerCloner
1025
+ except ModuleNotFoundError as e:
1026
+ raise ModuleNotFoundError(
1027
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1028
+ ) from e
1029
+
1030
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1031
+ cloner.rm(args.name, force=getattr(args, "force", False))
1032
+
1033
+
1034
+ def cmd_container_down(args):
1035
+ """Stop and remove a container."""
1036
+ try:
1037
+ from clonebox.container import ContainerCloner
1038
+ except ModuleNotFoundError as e:
1039
+ raise ModuleNotFoundError(
1040
+ "Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
1041
+ ) from e
1042
+
1043
+ cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
1044
+ cloner.stop(args.name)
1045
+ cloner.rm(args.name, force=True)
1046
+
1047
+
1048
+ def cmd_diagnose(args):
1049
+ """Run detailed VM diagnostics (standalone)."""
1050
+ name = args.name
1051
+ user_session = getattr(args, "user", False)
1052
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
1053
+
1054
+ try:
1055
+ vm_name, config_file = _resolve_vm_name_and_config_file(name)
1056
+ except FileNotFoundError as e:
1057
+ console.print(f"[red]❌ {e}[/]")
1058
+ return
1059
+
1060
+ run_vm_diagnostics(
1061
+ vm_name,
1062
+ conn_uri,
1063
+ config_file,
1064
+ verbose=getattr(args, "verbose", False),
1065
+ json_output=getattr(args, "json", False),
1066
+ )
1067
+
1068
+
582
1069
  def cmd_status(args):
583
1070
  """Check VM installation status and health from workstation."""
584
1071
  import subprocess
585
-
1072
+
586
1073
  name = args.name
587
1074
  user_session = getattr(args, "user", False)
588
1075
  conn_uri = "qemu:///session" if user_session else "qemu:///system"
589
-
590
- # If name is a path, load config to get VM name
591
- if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
592
- target_path = Path(name).expanduser().resolve()
593
- config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
594
- if config_file.exists():
595
- config = load_clonebox_config(config_file)
596
- name = config["vm"]["name"]
597
- else:
598
- console.print(f"[red]❌ Config not found: {config_file}[/]")
599
- return
600
-
601
- if not name:
602
- # Try current directory
603
- config_file = Path.cwd() / ".clonebox.yaml"
604
- if config_file.exists():
605
- config = load_clonebox_config(config_file)
606
- name = config["vm"]["name"]
607
- else:
608
- console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
609
- return
610
-
611
- console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
612
-
613
- # Check VM state
1076
+
614
1077
  try:
615
- result = subprocess.run(
616
- ["virsh", "--connect", conn_uri, "domstate", name],
617
- capture_output=True, text=True, timeout=5
618
- )
619
- vm_state = result.stdout.strip()
620
-
621
- if "running" in vm_state.lower():
622
- console.print(f"[green]✅ VM State: {vm_state}[/]")
623
- elif "shut off" in vm_state.lower():
624
- console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
625
- console.print("[dim]Start with: clonebox start .[/]")
626
- return
627
- else:
628
- console.print(f"[dim]VM State: {vm_state}[/]")
629
- except subprocess.TimeoutExpired:
630
- console.print("[red]❌ Timeout checking VM state[/]")
631
- return
632
- except Exception as e:
633
- console.print(f"[red]❌ Error: {e}[/]")
1078
+ vm_name, config_file = _resolve_vm_name_and_config_file(name)
1079
+ except FileNotFoundError as e:
1080
+ console.print(f"[red]❌ {e}[/]")
634
1081
  return
635
-
636
- # Get VM IP address
637
- console.print("\n[bold]🔍 Checking VM network...[/]")
638
- try:
639
- result = subprocess.run(
640
- ["virsh", "--connect", conn_uri, "domifaddr", name],
641
- capture_output=True, text=True, timeout=10
642
- )
643
- if result.stdout.strip():
644
- console.print(f"[dim]{result.stdout.strip()}[/]")
645
- # Extract IP
646
- for line in result.stdout.split('\n'):
647
- if 'ipv4' in line.lower():
648
- parts = line.split()
649
- for p in parts:
650
- if '/' in p and '.' in p:
651
- ip = p.split('/')[0]
652
- console.print(f"[green]IP Address: {ip}[/]")
653
- break
654
- else:
655
- console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
656
- except Exception as e:
657
- console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
658
-
659
- # Check cloud-init status via console
660
- console.print("\n[bold]☁️ Checking cloud-init status...[/]")
661
- try:
662
- # Use virsh console to check - this is tricky, so we check for the ready file
663
- result = subprocess.run(
664
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
665
- '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
666
- capture_output=True, text=True, timeout=10
667
- )
668
- if "CloneBox VM ready" in result.stdout or result.returncode == 0:
669
- console.print("[green]✅ Cloud-init: Complete[/]")
670
- else:
671
- console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
672
- except Exception:
673
- console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
674
-
675
- # Check mount status
676
- console.print("\n[bold]💾 Checking mount status...[/]")
677
- try:
678
- # Load config to get expected mounts
679
- config_file = Path.cwd() / ".clonebox.yaml"
680
- if config_file.exists():
681
- config = load_clonebox_config(config_file)
682
- all_paths = config.get("paths", {}).copy()
683
- all_paths.update(config.get("app_data_paths", {}))
684
-
685
- if all_paths:
686
- # Check which mounts are active
687
- result = subprocess.run(
688
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
689
- '{"execute":"guest-exec","arguments":{"path":"/bin/sh","arg":["-c","mount | grep 9p"],"capture-output":true}}'],
690
- capture_output=True, text=True, timeout=10
691
- )
692
-
693
- mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
694
- mount_table.add_column("Guest Path", style="bold")
695
- mount_table.add_column("Status", justify="center")
696
- mount_table.add_column("Files", justify="right")
697
-
698
- mounted_paths = []
699
- if result.returncode == 0 and "return" in result.stdout:
700
- # Parse guest-exec response for mount output
701
- import json
702
- try:
703
- resp = json.loads(result.stdout)
704
- if "return" in resp and "pid" in resp["return"]:
705
- # Get the output from guest-exec-status
706
- pid = resp["return"]["pid"]
707
- status_result = subprocess.run(
708
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
709
- f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
710
- capture_output=True, text=True, timeout=5
711
- )
712
- if status_result.returncode == 0:
713
- status_resp = json.loads(status_result.stdout)
714
- if "return" in status_resp and "out-data" in status_resp["return"]:
715
- import base64
716
- mount_output = base64.b64decode(status_resp["return"]["out-data"]).decode()
717
- mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
718
- except:
719
- pass
720
-
721
- # Check each expected mount
722
- working_mounts = 0
723
- total_mounts = 0
724
- for host_path, guest_path in all_paths.items():
725
- total_mounts += 1
726
- is_mounted = any(guest_path in mp for mp in mounted_paths)
727
-
728
- # Try to get file count
729
- file_count = "?"
730
- if is_mounted:
731
- try:
732
- count_result = subprocess.run(
733
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
734
- f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","ls -A {guest_path} 2>/dev/null | wc -l"],"capture-output":true}}}}'],
735
- capture_output=True, text=True, timeout=5
736
- )
737
- if count_result.returncode == 0:
738
- resp = json.loads(count_result.stdout)
739
- if "return" in resp and "pid" in resp["return"]:
740
- pid = resp["return"]["pid"]
741
- import time
742
- time.sleep(0.5)
743
- status_result = subprocess.run(
744
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
745
- f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
746
- capture_output=True, text=True, timeout=5
747
- )
748
- if status_result.returncode == 0:
749
- status_resp = json.loads(status_result.stdout)
750
- if "return" in status_resp and "out-data" in status_resp["return"]:
751
- file_count = base64.b64decode(status_resp["return"]["out-data"]).decode().strip()
752
- except:
753
- pass
754
-
755
- if is_mounted:
756
- status = "[green]✅ Mounted[/]"
757
- working_mounts += 1
758
- else:
759
- status = "[red]❌ Not mounted[/]"
760
-
761
- mount_table.add_row(guest_path, status, str(file_count))
762
-
763
- console.print(mount_table)
764
- console.print(f"[dim]{working_mounts}/{total_mounts} mounts active[/]")
765
-
766
- if working_mounts < total_mounts:
767
- console.print("[yellow]⚠️ Some mounts are missing. Try remounting in VM:[/]")
768
- console.print("[dim] sudo mount -a[/]")
769
- console.print("[dim]Or rebuild VM with: clonebox clone . --user --run --replace[/]")
770
- else:
771
- console.print("[dim]No mount points configured[/]")
772
- else:
773
- console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
774
- except Exception as e:
775
- console.print(f"[yellow]⚠️ Cannot check mounts: {e}[/]")
776
- console.print("[dim]QEMU guest agent may not be ready yet[/]")
777
-
778
- # Check health status if available
779
- console.print("\n[bold]🏥 Health Check Status...[/]")
780
- try:
781
- result = subprocess.run(
782
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
783
- '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
784
- capture_output=True, text=True, timeout=10
785
- )
786
- if "HEALTH_STATUS=OK" in result.stdout:
787
- console.print("[green]✅ Health: All checks passed[/]")
788
- elif "HEALTH_STATUS=FAILED" in result.stdout:
789
- console.print("[red]❌ Health: Some checks failed[/]")
790
- else:
791
- console.print("[yellow]⏳ Health check not yet run[/]")
792
- except Exception:
793
- console.print("[dim]Health status: Not available yet[/]")
794
-
1082
+
1083
+ run_vm_diagnostics(vm_name, conn_uri, config_file, verbose=False, json_output=False)
1084
+
795
1085
  # Show useful commands
796
1086
  console.print("\n[bold]📋 Useful commands:[/]")
797
- console.print(f" [cyan]virt-viewer --connect {conn_uri} {name}[/] # Open GUI")
798
- console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/] # Console access")
1087
+ console.print(f" [cyan]virt-viewer --connect {conn_uri} {vm_name}[/] # Open GUI")
1088
+ console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/] # Console access")
799
1089
  console.print(" [dim]Inside VM:[/]")
800
1090
  console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
801
1091
  console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
802
1092
  console.print(" [cyan]clonebox-health[/] # Re-run health check")
1093
+ console.print(" [dim]On host:[/]")
1094
+ console.print(" [cyan]clonebox test . --user --validate[/] # Full validation (mounts/packages/services)")
803
1095
 
804
1096
  # Run full health check if requested
805
1097
  if getattr(args, "health", False):
806
1098
  console.print("\n[bold]🔄 Running full health check...[/]")
807
1099
  try:
808
1100
  result = subprocess.run(
809
- ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
1101
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
810
1102
  '{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
811
1103
  capture_output=True, text=True, timeout=60
812
1104
  )
813
1105
  console.print("[green]Health check triggered. View results with:[/]")
814
- console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/]")
1106
+ console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/]")
815
1107
  console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
816
1108
  except Exception as e:
817
1109
  console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
@@ -2050,6 +2342,64 @@ def main():
2050
2342
  )
2051
2343
  list_parser.set_defaults(func=cmd_list)
2052
2344
 
2345
+ # Container command
2346
+ container_parser = subparsers.add_parser("container", help="Manage container sandboxes")
2347
+ container_parser.add_argument(
2348
+ "--engine",
2349
+ choices=["auto", "podman", "docker"],
2350
+ default="auto",
2351
+ help="Container engine: auto (default), podman, docker",
2352
+ )
2353
+ container_sub = container_parser.add_subparsers(dest="container_command", help="Container commands")
2354
+
2355
+ container_up = container_sub.add_parser("up", help="Start container")
2356
+ container_up.add_argument("path", nargs="?", default=".", help="Workspace path")
2357
+ container_up.add_argument("--name", help="Container name")
2358
+ container_up.add_argument("--image", default="ubuntu:22.04", help="Container image")
2359
+ container_up.add_argument("--detach", action="store_true", help="Run container in background")
2360
+ container_up.add_argument(
2361
+ "--mount",
2362
+ action="append",
2363
+ default=[],
2364
+ help="Extra mount HOST:CONTAINER (repeatable)",
2365
+ )
2366
+ container_up.add_argument(
2367
+ "--port",
2368
+ action="append",
2369
+ default=[],
2370
+ help="Port mapping (e.g. 8080:80) (repeatable)",
2371
+ )
2372
+ container_up.add_argument(
2373
+ "--package",
2374
+ action="append",
2375
+ default=[],
2376
+ help="APT package to install in image (repeatable)",
2377
+ )
2378
+ container_up.add_argument(
2379
+ "--no-dotenv",
2380
+ action="store_true",
2381
+ help="Do not load env vars from workspace .env",
2382
+ )
2383
+ container_up.set_defaults(func=cmd_container_up)
2384
+
2385
+ container_ps = container_sub.add_parser("ps", aliases=["ls"], help="List containers")
2386
+ container_ps.add_argument("-a", "--all", action="store_true", help="Show all containers")
2387
+ container_ps.add_argument("--json", action="store_true", help="Output JSON")
2388
+ container_ps.set_defaults(func=cmd_container_ps)
2389
+
2390
+ container_stop = container_sub.add_parser("stop", help="Stop container")
2391
+ container_stop.add_argument("name", help="Container name")
2392
+ container_stop.set_defaults(func=cmd_container_stop)
2393
+
2394
+ container_rm = container_sub.add_parser("rm", help="Remove container")
2395
+ container_rm.add_argument("name", help="Container name")
2396
+ container_rm.add_argument("-f", "--force", action="store_true", help="Force remove")
2397
+ container_rm.set_defaults(func=cmd_container_rm)
2398
+
2399
+ container_down = container_sub.add_parser("down", help="Stop and remove container")
2400
+ container_down.add_argument("name", help="Container name")
2401
+ container_down.set_defaults(func=cmd_container_down)
2402
+
2053
2403
  # Detect command
2054
2404
  detect_parser = subparsers.add_parser("detect", help="Detect system state")
2055
2405
  detect_parser.add_argument("--json", action="store_true", help="Output as JSON")
@@ -2117,6 +2467,27 @@ def main():
2117
2467
  )
2118
2468
  status_parser.set_defaults(func=cmd_status)
2119
2469
 
2470
+ # Diagnose command - detailed diagnostics from workstation
2471
+ diagnose_parser = subparsers.add_parser(
2472
+ "diagnose", aliases=["diag"], help="Run detailed VM diagnostics"
2473
+ )
2474
+ diagnose_parser.add_argument(
2475
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
2476
+ )
2477
+ diagnose_parser.add_argument(
2478
+ "-u",
2479
+ "--user",
2480
+ action="store_true",
2481
+ help="Use user session (qemu:///session)",
2482
+ )
2483
+ diagnose_parser.add_argument(
2484
+ "--verbose", "-v", action="store_true", help="Show more low-level details"
2485
+ )
2486
+ diagnose_parser.add_argument(
2487
+ "--json", action="store_true", help="Print diagnostics as JSON"
2488
+ )
2489
+ diagnose_parser.set_defaults(func=cmd_diagnose)
2490
+
2120
2491
  # Export command - package VM for migration
2121
2492
  export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
2122
2493
  export_parser.add_argument(
clonebox/cloner.py CHANGED
@@ -668,7 +668,7 @@ fi
668
668
  tag = f"mount{idx}"
669
669
  # Use uid=1000,gid=1000 to give ubuntu user access to mounts
670
670
  # mmap allows proper file mapping
671
- mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000"
671
+ mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000,users"
672
672
  mount_commands.append(f" - mkdir -p {guest_path}")
673
673
  mount_commands.append(f" - chown 1000:1000 {guest_path}")
674
674
  mount_commands.append(
@@ -679,7 +679,7 @@ fi
679
679
 
680
680
  # User-data
681
681
  # Add desktop environment if GUI is enabled
682
- base_packages = []
682
+ base_packages = ["qemu-guest-agent"]
683
683
  if config.gui:
684
684
  base_packages.extend([
685
685
  "ubuntu-desktop-minimal",
@@ -693,6 +693,8 @@ fi
693
693
 
694
694
  # Build runcmd - services, mounts, snaps, post_commands
695
695
  runcmd_lines = []
696
+
697
+ runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || true")
696
698
 
697
699
  # Add service enablement
698
700
  for svc in config.services:
@@ -700,9 +702,10 @@ fi
700
702
 
701
703
  # Add fstab entries for persistent mounts after reboot
702
704
  if fstab_entries:
703
- runcmd_lines.append(" - echo '# CloneBox 9p mounts' >> /etc/fstab")
705
+ runcmd_lines.append(" - grep -q '^# CloneBox 9p mounts' /etc/fstab || echo '# CloneBox 9p mounts' >> /etc/fstab")
704
706
  for entry in fstab_entries:
705
- runcmd_lines.append(f" - echo '{entry}' >> /etc/fstab")
707
+ runcmd_lines.append(f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab")
708
+ runcmd_lines.append(" - mount -a || true")
706
709
 
707
710
  # Add mounts (immediate, before reboot)
708
711
  for cmd in mount_commands:
@@ -740,6 +743,8 @@ fi
740
743
  runcmd_lines.append(" - sleep 10 && reboot")
741
744
 
742
745
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
746
+ bootcmd_yaml = "\n".join(mount_commands) if mount_commands else ""
747
+ bootcmd_block = f"\nbootcmd:\n{bootcmd_yaml}\n" if bootcmd_yaml else ""
743
748
 
744
749
  # Remove power_state - using shutdown -r instead
745
750
  power_state_yaml = ""
@@ -765,6 +770,7 @@ chpasswd:
765
770
  # Update package cache and upgrade
766
771
  package_update: true
767
772
  package_upgrade: false
773
+ {bootcmd_block}
768
774
 
769
775
  # Install packages (cloud-init waits for completion before runcmd)
770
776
  packages:
@@ -811,8 +817,6 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
811
817
  try:
812
818
  vm = self.conn.lookupByName(vm_name)
813
819
  except libvirt.libvirtError:
814
- if ignore_not_found:
815
- return False
816
820
  log(f"[red]❌ VM '{vm_name}' not found[/]")
817
821
  return False
818
822
 
clonebox/container.py ADDED
@@ -0,0 +1,190 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import json
4
+ import shutil
5
+ import subprocess
6
+ import tempfile
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ from clonebox.models import ContainerConfig
11
+
12
+
13
+ class ContainerCloner:
14
+ def __init__(self, engine: str = "auto"):
15
+ self.engine = self._resolve_engine(engine)
16
+
17
+ def _resolve_engine(self, engine: str) -> str:
18
+ if engine == "auto":
19
+ return self.detect_engine()
20
+ if engine not in {"podman", "docker"}:
21
+ raise ValueError("engine must be one of: auto, podman, docker")
22
+ if shutil.which(engine) is None:
23
+ raise RuntimeError(f"Container engine not found: {engine}")
24
+ self._run([engine, "--version"], check=True)
25
+ return engine
26
+
27
+ def detect_engine(self) -> str:
28
+ if shutil.which("podman") is not None:
29
+ try:
30
+ self._run(["podman", "--version"], check=True)
31
+ return "podman"
32
+ except Exception:
33
+ pass
34
+
35
+ if shutil.which("docker") is not None:
36
+ try:
37
+ self._run(["docker", "--version"], check=True)
38
+ return "docker"
39
+ except Exception:
40
+ pass
41
+
42
+ raise RuntimeError("No container engine found (podman/docker)")
43
+
44
+ def _run(
45
+ self,
46
+ cmd: List[str],
47
+ check: bool = True,
48
+ capture_output: bool = True,
49
+ text: bool = True,
50
+ ) -> subprocess.CompletedProcess:
51
+ return subprocess.run(cmd, check=check, capture_output=capture_output, text=text)
52
+
53
+ def build_dockerfile(self, config: ContainerConfig) -> str:
54
+ lines: List[str] = [f"FROM {config.image}"]
55
+
56
+ if config.packages:
57
+ pkgs = " ".join(config.packages)
58
+ lines.append(
59
+ "RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y "
60
+ + pkgs
61
+ + " && rm -rf /var/lib/apt/lists/*"
62
+ )
63
+
64
+ lines.append("WORKDIR /workspace")
65
+ lines.append('CMD ["bash"]')
66
+ return "\n".join(lines) + "\n"
67
+
68
+ def build_image(self, config: ContainerConfig, tag: Optional[str] = None) -> str:
69
+ if tag is None:
70
+ tag = f"{config.name}:latest"
71
+
72
+ dockerfile = self.build_dockerfile(config)
73
+ workspace = Path(config.workspace).resolve()
74
+
75
+ with tempfile.NamedTemporaryFile(prefix="clonebox-dockerfile-", delete=False) as f:
76
+ dockerfile_path = Path(f.name)
77
+ f.write(dockerfile.encode())
78
+
79
+ try:
80
+ self._run(
81
+ [
82
+ self.engine,
83
+ "build",
84
+ "-f",
85
+ str(dockerfile_path),
86
+ "-t",
87
+ tag,
88
+ str(workspace),
89
+ ],
90
+ check=True,
91
+ )
92
+ finally:
93
+ try:
94
+ dockerfile_path.unlink()
95
+ except Exception:
96
+ pass
97
+
98
+ return tag
99
+
100
+ def up(self, config: ContainerConfig, detach: bool = False, remove: bool = True) -> None:
101
+ engine = self._resolve_engine(config.engine if config.engine != "auto" else self.engine)
102
+
103
+ image = config.image
104
+ if config.packages:
105
+ image = self.build_image(config)
106
+
107
+ cmd: List[str] = [engine, "run"]
108
+ cmd.append("-d" if detach else "-it")
109
+
110
+ if remove:
111
+ cmd.append("--rm")
112
+
113
+ cmd.extend(["--name", config.name])
114
+ cmd.extend(["-w", "/workspace"])
115
+
116
+ env_file = Path(config.workspace) / ".env"
117
+ if config.env_from_dotenv and env_file.exists():
118
+ cmd.extend(["--env-file", str(env_file)])
119
+
120
+ for src, dst in config.mounts.items():
121
+ cmd.extend(["-v", f"{src}:{dst}"])
122
+
123
+ for p in config.ports:
124
+ cmd.extend(["-p", p])
125
+
126
+ cmd.append(image)
127
+
128
+ if detach:
129
+ cmd.extend(["sleep", "infinity"])
130
+ else:
131
+ cmd.append("bash")
132
+
133
+ subprocess.run(cmd, check=True)
134
+
135
+ def stop(self, name: str) -> None:
136
+ subprocess.run([self.engine, "stop", name], check=True)
137
+
138
+ def rm(self, name: str, force: bool = False) -> None:
139
+ cmd = [self.engine, "rm"]
140
+ if force:
141
+ cmd.append("-f")
142
+ cmd.append(name)
143
+ subprocess.run(cmd, check=True)
144
+
145
+ def ps(self, all: bool = False) -> List[Dict[str, Any]]:
146
+ if self.engine == "podman":
147
+ cmd = ["podman", "ps", "--format", "json"]
148
+ if all:
149
+ cmd.append("-a")
150
+ result = self._run(cmd, check=True)
151
+ try:
152
+ parsed = json.loads(result.stdout or "[]")
153
+ except json.JSONDecodeError:
154
+ return []
155
+
156
+ items: List[Dict[str, Any]] = []
157
+ for c in parsed:
158
+ name = ""
159
+ names = c.get("Names")
160
+ if isinstance(names, list) and names:
161
+ name = str(names[0])
162
+ elif isinstance(names, str):
163
+ name = names
164
+
165
+ items.append(
166
+ {
167
+ "name": name,
168
+ "image": c.get("Image") or c.get("ImageName") or "",
169
+ "status": c.get("State") or c.get("Status") or "",
170
+ "ports": c.get("Ports") or [],
171
+ }
172
+ )
173
+ return items
174
+
175
+ cmd = ["docker", "ps", "--format", "{{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}"]
176
+ if all:
177
+ cmd.insert(2, "-a")
178
+
179
+ result = self._run(cmd, check=True)
180
+ items: List[Dict[str, Any]] = []
181
+ for line in (result.stdout or "").splitlines():
182
+ if not line.strip():
183
+ continue
184
+ parts = line.split("\t")
185
+ name = parts[0] if len(parts) > 0 else ""
186
+ image = parts[1] if len(parts) > 1 else ""
187
+ status = parts[2] if len(parts) > 2 else ""
188
+ ports = parts[3] if len(parts) > 3 else ""
189
+ items.append({"name": name, "image": image, "status": status, "ports": ports})
190
+ return items
clonebox/models.py CHANGED
@@ -4,7 +4,8 @@ Pydantic models for CloneBox configuration validation.
4
4
  """
5
5
 
6
6
  from pathlib import Path
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, Dict, List, Literal, Optional
8
+ from uuid import uuid4
8
9
 
9
10
  from pydantic import BaseModel, Field, field_validator, model_validator
10
11
 
@@ -124,5 +125,73 @@ class CloneBoxConfig(BaseModel):
124
125
  )
125
126
 
126
127
 
128
+ class ContainerConfig(BaseModel):
129
+ name: str = Field(default_factory=lambda: f"clonebox-{uuid4().hex[:8]}")
130
+ engine: Literal["auto", "podman", "docker"] = "auto"
131
+ image: str = "ubuntu:22.04"
132
+ workspace: Path = Path(".")
133
+ extra_mounts: Dict[str, str] = Field(default_factory=dict)
134
+ env_from_dotenv: bool = True
135
+ packages: List[str] = Field(default_factory=list)
136
+ ports: List[str] = Field(default_factory=list)
137
+
138
+ @field_validator("name")
139
+ @classmethod
140
+ def name_must_be_valid(cls, v: str) -> str:
141
+ if not v or not v.strip():
142
+ raise ValueError("Container name cannot be empty")
143
+ if len(v) > 64:
144
+ raise ValueError("Container name must be <= 64 characters")
145
+ return v.strip()
146
+
147
+ @field_validator("extra_mounts")
148
+ @classmethod
149
+ def extra_mounts_must_be_absolute(cls, v: Dict[str, str]) -> Dict[str, str]:
150
+ for host_path, container_path in v.items():
151
+ if not str(host_path).startswith("/"):
152
+ raise ValueError(f"Host path must be absolute: {host_path}")
153
+ if not str(container_path).startswith("/"):
154
+ raise ValueError(f"Container path must be absolute: {container_path}")
155
+ return v
156
+
157
+ @field_validator("ports")
158
+ @classmethod
159
+ def ports_must_be_valid(cls, v: List[str]) -> List[str]:
160
+ for p in v:
161
+ if not isinstance(p, str) or not p.strip():
162
+ raise ValueError("Port mapping cannot be empty")
163
+ if ":" in p:
164
+ host, container = p.split(":", 1)
165
+ if not host.isdigit() or not container.isdigit():
166
+ raise ValueError(f"Invalid port mapping: {p}")
167
+ else:
168
+ if not p.isdigit():
169
+ raise ValueError(f"Invalid port value: {p}")
170
+ return v
171
+
172
+ @property
173
+ def mounts(self) -> Dict[str, str]:
174
+ mounts: Dict[str, str] = {
175
+ str(self.workspace.resolve()): "/workspace",
176
+ }
177
+ mounts.update(self.extra_mounts)
178
+ return mounts
179
+
180
+ def to_docker_run_cmd(self) -> List[str]:
181
+ if self.engine == "auto":
182
+ raise ValueError("engine must be resolved before generating run command")
183
+
184
+ cmd: List[str] = [self.engine, "run", "-it", "--rm", "--name", self.name]
185
+
186
+ for src, dst in self.mounts.items():
187
+ cmd.extend(["-v", f"{src}:{dst}"])
188
+
189
+ for p in self.ports:
190
+ cmd.extend(["-p", p])
191
+
192
+ cmd.append(self.image)
193
+ return cmd
194
+
195
+
127
196
  # Backwards compatibility alias
128
197
  VMConfigModel = CloneBoxConfig
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.15
3
+ Version: 0.1.17
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -0,0 +1,14 @@
1
+ clonebox/__init__.py,sha256=C1J7Uwrp8H9Zopo5JgrQYzXg-PWls1JdqmE_0Qp1Tro,408
2
+ clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
+ clonebox/cli.py,sha256=mB2Xoz9llkHW9yIR-xRuZ94F_PmV833gdiKVKKcJQrc,98462
4
+ clonebox/cloner.py,sha256=tgN51yeNGesolO1wfuVh-CAGkAZew7oMoCYYz_bXgBk,32456
5
+ clonebox/container.py,sha256=tiYK1ZB-DhdD6A2FuMA0h_sRNkUI7KfYcJ0tFOcdyeM,6105
6
+ clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
7
+ clonebox/models.py,sha256=Uxz9eHov2epJpNYbl0ejaOX91iMSjqdHskGdC8-smVk,7789
8
+ clonebox/validator.py,sha256=8HV3ahfiLkFDOH4UOmZr7-fGfhKep1Jlw1joJeWSaQE,15858
9
+ clonebox-0.1.17.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
10
+ clonebox-0.1.17.dist-info/METADATA,sha256=YHJ3k1qzqhkB7hsuZVesQv5ANF6bbABLGTYSCl29p34,35220
11
+ clonebox-0.1.17.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
12
+ clonebox-0.1.17.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
13
+ clonebox-0.1.17.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
14
+ clonebox-0.1.17.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- clonebox/__init__.py,sha256=C1J7Uwrp8H9Zopo5JgrQYzXg-PWls1JdqmE_0Qp1Tro,408
2
- clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
- clonebox/cli.py,sha256=xo7PJx9XODx9dfIbmuikDncTIGtpU3aAW3-S4iCxv-s,86697
4
- clonebox/cloner.py,sha256=fVfphsPbsqW4ASnv4bkrDIL8Ks9aPUvxx-IOO_d2FTw,32102
5
- clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
- clonebox/models.py,sha256=l3z1gm4TAIKzikUrQQn9yfxI62vrQRuHQxV1uftY0fY,5260
7
- clonebox/validator.py,sha256=8HV3ahfiLkFDOH4UOmZr7-fGfhKep1Jlw1joJeWSaQE,15858
8
- clonebox-0.1.15.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
9
- clonebox-0.1.15.dist-info/METADATA,sha256=Tg6u-MfJXaO2MrdlsnFFw584tGPrVINffw6ydx2OrH4,35220
10
- clonebox-0.1.15.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
11
- clonebox-0.1.15.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
12
- clonebox-0.1.15.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
13
- clonebox-0.1.15.dist-info/RECORD,,