clonebox 0.1.26__py3-none-any.whl → 0.1.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/cli.py +391 -230
- clonebox/cloner.py +335 -206
- clonebox/dashboard.py +4 -4
- clonebox/detector.py +19 -31
- clonebox/models.py +19 -2
- clonebox/profiles.py +1 -5
- clonebox/validator.py +275 -145
- {clonebox-0.1.26.dist-info → clonebox-0.1.27.dist-info}/METADATA +1 -1
- clonebox-0.1.27.dist-info/RECORD +17 -0
- clonebox-0.1.26.dist-info/RECORD +0 -17
- {clonebox-0.1.26.dist-info → clonebox-0.1.27.dist-info}/WHEEL +0 -0
- {clonebox-0.1.26.dist-info → clonebox-0.1.27.dist-info}/entry_points.txt +0 -0
- {clonebox-0.1.26.dist-info → clonebox-0.1.27.dist-info}/licenses/LICENSE +0 -0
- {clonebox-0.1.26.dist-info → clonebox-0.1.27.dist-info}/top_level.txt +0 -0
clonebox/cli.py
CHANGED
|
@@ -248,7 +248,12 @@ def run_vm_diagnostics(
|
|
|
248
248
|
# Fallback: try to get IP via QEMU Guest Agent (useful for slirp/user networking)
|
|
249
249
|
if guest_agent_ready:
|
|
250
250
|
try:
|
|
251
|
-
ip_out = _qga_exec(
|
|
251
|
+
ip_out = _qga_exec(
|
|
252
|
+
vm_name,
|
|
253
|
+
conn_uri,
|
|
254
|
+
"ip -4 -o addr show scope global | awk '{print $4}'",
|
|
255
|
+
timeout=5,
|
|
256
|
+
)
|
|
252
257
|
if ip_out and ip_out.strip():
|
|
253
258
|
console.print(f"[green]IP (via QGA): {ip_out.strip()}[/]")
|
|
254
259
|
result["network"]["qga_ip"] = ip_out.strip()
|
|
@@ -306,27 +311,39 @@ def run_vm_diagnostics(
|
|
|
306
311
|
result["qga"]["ping_stdout"] = ping_attempt.stdout.strip()
|
|
307
312
|
result["qga"]["ping_stderr"] = ping_attempt.stderr.strip()
|
|
308
313
|
if ping_attempt.stderr.strip():
|
|
309
|
-
console.print(
|
|
314
|
+
console.print(
|
|
315
|
+
f"[dim]qemu-agent-command stderr: {ping_attempt.stderr.strip()}[/]"
|
|
316
|
+
)
|
|
310
317
|
except Exception as e:
|
|
311
318
|
result["qga"]["ping_error"] = str(e)
|
|
312
319
|
|
|
313
|
-
console.print(
|
|
314
|
-
|
|
320
|
+
console.print(
|
|
321
|
+
"[dim]If channel is present, the agent inside VM may not be running yet.[/]"
|
|
322
|
+
)
|
|
323
|
+
console.print(
|
|
324
|
+
"[dim]Inside VM try: sudo systemctl status qemu-guest-agent && sudo systemctl restart qemu-guest-agent[/]"
|
|
325
|
+
)
|
|
315
326
|
|
|
316
327
|
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
317
328
|
cloud_init_complete = False
|
|
318
329
|
if not guest_agent_ready:
|
|
319
330
|
result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
|
|
320
|
-
console.print(
|
|
331
|
+
console.print(
|
|
332
|
+
"[yellow]⏳ Cloud-init status: Unknown (QEMU guest agent not connected yet)[/]"
|
|
333
|
+
)
|
|
321
334
|
else:
|
|
322
|
-
ready_msg = _qga_exec(
|
|
335
|
+
ready_msg = _qga_exec(
|
|
336
|
+
vm_name, conn_uri, "cat /var/log/clonebox-ready 2>/dev/null || true", timeout=10
|
|
337
|
+
)
|
|
323
338
|
result["cloud_init"]["clonebox_ready_file"] = ready_msg
|
|
324
339
|
if ready_msg and "CloneBox VM ready" in ready_msg:
|
|
325
340
|
cloud_init_complete = True
|
|
326
341
|
result["cloud_init"]["status"] = "complete"
|
|
327
342
|
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
328
343
|
else:
|
|
329
|
-
ci_status = _qga_exec(
|
|
344
|
+
ci_status = _qga_exec(
|
|
345
|
+
vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=10
|
|
346
|
+
)
|
|
330
347
|
result["cloud_init"]["cloud_init_status"] = ci_status
|
|
331
348
|
result["cloud_init"]["status"] = "running"
|
|
332
349
|
console.print("[yellow]⏳ Cloud-init: Still running[/]")
|
|
@@ -373,10 +390,14 @@ def run_vm_diagnostics(
|
|
|
373
390
|
file_count: str = "?"
|
|
374
391
|
|
|
375
392
|
if is_mounted:
|
|
376
|
-
test_out = _qga_exec(
|
|
393
|
+
test_out = _qga_exec(
|
|
394
|
+
vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5
|
|
395
|
+
)
|
|
377
396
|
accessible = test_out == "yes"
|
|
378
397
|
if accessible:
|
|
379
|
-
count_str = _qga_exec(
|
|
398
|
+
count_str = _qga_exec(
|
|
399
|
+
vm_name, conn_uri, f"ls -A {guest_path} 2>/dev/null | wc -l", timeout=5
|
|
400
|
+
)
|
|
380
401
|
if count_str and count_str.strip().isdigit():
|
|
381
402
|
file_count = count_str.strip()
|
|
382
403
|
|
|
@@ -386,7 +407,11 @@ def run_vm_diagnostics(
|
|
|
386
407
|
mount_table.add_row(
|
|
387
408
|
guest_path,
|
|
388
409
|
"[green]✅[/]" if is_mounted else "[red]❌[/]",
|
|
389
|
-
|
|
410
|
+
(
|
|
411
|
+
"[green]✅[/]"
|
|
412
|
+
if accessible
|
|
413
|
+
else ("[red]❌[/]" if is_mounted else "[dim]N/A[/]")
|
|
414
|
+
),
|
|
390
415
|
file_count,
|
|
391
416
|
)
|
|
392
417
|
mounts_detail.append(
|
|
@@ -410,7 +435,9 @@ def run_vm_diagnostics(
|
|
|
410
435
|
result["health"]["status"] = "unknown"
|
|
411
436
|
console.print("[dim]Health status: Not available yet (QEMU guest agent not ready)[/]")
|
|
412
437
|
else:
|
|
413
|
-
health_status = _qga_exec(
|
|
438
|
+
health_status = _qga_exec(
|
|
439
|
+
vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10
|
|
440
|
+
)
|
|
414
441
|
result["health"]["raw"] = health_status
|
|
415
442
|
if health_status and "HEALTH_STATUS=OK" in health_status:
|
|
416
443
|
result["health"]["status"] = "ok"
|
|
@@ -452,13 +479,27 @@ def cmd_watch(args):
|
|
|
452
479
|
time.sleep(min(refresh, 2.0))
|
|
453
480
|
|
|
454
481
|
if not _qga_ping(vm_name, conn_uri):
|
|
455
|
-
console.print(
|
|
456
|
-
|
|
482
|
+
console.print(
|
|
483
|
+
"[yellow]⚠️ QEMU Guest Agent not connected - cannot watch diagnostic status yet[/]"
|
|
484
|
+
)
|
|
485
|
+
console.print(
|
|
486
|
+
f"[dim]Try: clonebox status {name or vm_name} {'--user' if user_session else ''} --verbose[/]"
|
|
487
|
+
)
|
|
457
488
|
return
|
|
458
489
|
|
|
459
490
|
def _read_status() -> Tuple[Optional[Dict[str, Any]], str]:
|
|
460
|
-
status_raw = _qga_exec(
|
|
461
|
-
|
|
491
|
+
status_raw = _qga_exec(
|
|
492
|
+
vm_name, conn_uri, "cat /var/run/clonebox-status.json 2>/dev/null || true", timeout=10
|
|
493
|
+
)
|
|
494
|
+
log_tail = (
|
|
495
|
+
_qga_exec(
|
|
496
|
+
vm_name,
|
|
497
|
+
conn_uri,
|
|
498
|
+
"tail -n 40 /var/log/clonebox-boot.log 2>/dev/null || true",
|
|
499
|
+
timeout=10,
|
|
500
|
+
)
|
|
501
|
+
or ""
|
|
502
|
+
)
|
|
462
503
|
|
|
463
504
|
status_obj: Optional[Dict[str, Any]] = None
|
|
464
505
|
if status_raw:
|
|
@@ -480,12 +521,14 @@ def cmd_watch(args):
|
|
|
480
521
|
|
|
481
522
|
stats = ""
|
|
482
523
|
if status_obj:
|
|
483
|
-
stats = (
|
|
484
|
-
f"passed={status_obj.get('passed', 0)} failed={status_obj.get('failed', 0)} repaired={status_obj.get('repaired', 0)} total={status_obj.get('total', 0)}"
|
|
485
|
-
)
|
|
524
|
+
stats = f"passed={status_obj.get('passed', 0)} failed={status_obj.get('failed', 0)} repaired={status_obj.get('repaired', 0)} total={status_obj.get('total', 0)}"
|
|
486
525
|
|
|
487
526
|
body = "\n".join([s for s in [header, stats, "", log_tail.strip()] if s])
|
|
488
|
-
live.update(
|
|
527
|
+
live.update(
|
|
528
|
+
Panel(
|
|
529
|
+
body or "(no output yet)", title="CloneBox boot diagnostic", border_style="cyan"
|
|
530
|
+
)
|
|
531
|
+
)
|
|
489
532
|
|
|
490
533
|
if phase == "complete":
|
|
491
534
|
break
|
|
@@ -515,9 +558,13 @@ def cmd_repair(args):
|
|
|
515
558
|
return
|
|
516
559
|
|
|
517
560
|
console.print(f"[cyan]🔧 Running boot diagnostic/repair in VM: {vm_name}[/]")
|
|
518
|
-
out = _qga_exec(
|
|
561
|
+
out = _qga_exec(
|
|
562
|
+
vm_name, conn_uri, "/usr/local/bin/clonebox-boot-diagnostic || true", timeout=timeout
|
|
563
|
+
)
|
|
519
564
|
if out is None:
|
|
520
|
-
console.print(
|
|
565
|
+
console.print(
|
|
566
|
+
"[yellow]⚠️ Repair triggered but output not available via QGA (check VM console/log)[/]"
|
|
567
|
+
)
|
|
521
568
|
elif out.strip():
|
|
522
569
|
console.print(Panel(out.strip()[-3000:], title="Command output", border_style="cyan"))
|
|
523
570
|
|
|
@@ -894,7 +941,9 @@ def cmd_start(args):
|
|
|
894
941
|
|
|
895
942
|
# Create new VM from config
|
|
896
943
|
console.print(f"[cyan]Creating VM '{vm_name}' from config...[/]\n")
|
|
897
|
-
vm_uuid = create_vm_from_config(
|
|
944
|
+
vm_uuid = create_vm_from_config(
|
|
945
|
+
config, start=True, user_session=getattr(args, "user", False)
|
|
946
|
+
)
|
|
898
947
|
console.print(f"\n[bold green]🎉 VM '{vm_name}' is running![/]")
|
|
899
948
|
console.print(f"[dim]UUID: {vm_uuid}[/]")
|
|
900
949
|
|
|
@@ -927,11 +976,11 @@ def cmd_start(args):
|
|
|
927
976
|
def cmd_open(args):
|
|
928
977
|
"""Open VM viewer window."""
|
|
929
978
|
import subprocess
|
|
930
|
-
|
|
979
|
+
|
|
931
980
|
name = args.name
|
|
932
981
|
user_session = getattr(args, "user", False)
|
|
933
982
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
934
|
-
|
|
983
|
+
|
|
935
984
|
# If name is a path, load config
|
|
936
985
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
937
986
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -948,18 +997,22 @@ def cmd_open(args):
|
|
|
948
997
|
config = load_clonebox_config(config_file)
|
|
949
998
|
name = config["vm"]["name"]
|
|
950
999
|
else:
|
|
951
|
-
console.print(
|
|
1000
|
+
console.print(
|
|
1001
|
+
"[red]❌ No VM name specified and no .clonebox.yaml in current directory[/]"
|
|
1002
|
+
)
|
|
952
1003
|
console.print("[dim]Usage: clonebox open <vm-name> or clonebox open .[/]")
|
|
953
1004
|
return
|
|
954
|
-
|
|
1005
|
+
|
|
955
1006
|
# Check if VM is running
|
|
956
1007
|
try:
|
|
957
1008
|
result = subprocess.run(
|
|
958
1009
|
["virsh", "--connect", conn_uri, "domstate", name],
|
|
959
|
-
capture_output=True,
|
|
1010
|
+
capture_output=True,
|
|
1011
|
+
text=True,
|
|
1012
|
+
timeout=10,
|
|
960
1013
|
)
|
|
961
1014
|
state = result.stdout.strip()
|
|
962
|
-
|
|
1015
|
+
|
|
963
1016
|
if state != "running":
|
|
964
1017
|
console.print(f"[yellow]⚠️ VM '{name}' is not running (state: {state})[/]")
|
|
965
1018
|
if questionary.confirm(
|
|
@@ -973,14 +1026,11 @@ def cmd_open(args):
|
|
|
973
1026
|
except Exception as e:
|
|
974
1027
|
console.print(f"[red]❌ Error checking VM state: {e}[/]")
|
|
975
1028
|
return
|
|
976
|
-
|
|
1029
|
+
|
|
977
1030
|
# Open virt-viewer
|
|
978
1031
|
console.print(f"[cyan]Opening viewer for VM: {name}[/]")
|
|
979
1032
|
try:
|
|
980
|
-
subprocess.run(
|
|
981
|
-
["virt-viewer", "--connect", conn_uri, name],
|
|
982
|
-
check=True
|
|
983
|
-
)
|
|
1033
|
+
subprocess.run(["virt-viewer", "--connect", conn_uri, name], check=True)
|
|
984
1034
|
except FileNotFoundError:
|
|
985
1035
|
console.print("[red]❌ virt-viewer not found[/]")
|
|
986
1036
|
console.print("Install with: sudo apt install virt-viewer")
|
|
@@ -991,7 +1041,7 @@ def cmd_open(args):
|
|
|
991
1041
|
def cmd_stop(args):
|
|
992
1042
|
"""Stop a VM."""
|
|
993
1043
|
name = args.name
|
|
994
|
-
|
|
1044
|
+
|
|
995
1045
|
# If name is a path, load config
|
|
996
1046
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
997
1047
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -1002,7 +1052,7 @@ def cmd_stop(args):
|
|
|
1002
1052
|
else:
|
|
1003
1053
|
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1004
1054
|
return
|
|
1005
|
-
|
|
1055
|
+
|
|
1006
1056
|
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1007
1057
|
cloner.stop_vm(name, force=args.force, console=console)
|
|
1008
1058
|
|
|
@@ -1010,7 +1060,7 @@ def cmd_stop(args):
|
|
|
1010
1060
|
def cmd_restart(args):
|
|
1011
1061
|
"""Restart a VM (stop and start)."""
|
|
1012
1062
|
name = args.name
|
|
1013
|
-
|
|
1063
|
+
|
|
1014
1064
|
# If name is a path, load config
|
|
1015
1065
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1016
1066
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -1021,20 +1071,20 @@ def cmd_restart(args):
|
|
|
1021
1071
|
else:
|
|
1022
1072
|
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1023
1073
|
return
|
|
1024
|
-
|
|
1074
|
+
|
|
1025
1075
|
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1026
|
-
|
|
1076
|
+
|
|
1027
1077
|
# Stop the VM
|
|
1028
1078
|
console.print("[bold yellow]🔄 Stopping VM...[/]")
|
|
1029
1079
|
cloner.stop_vm(name, force=args.force, console=console)
|
|
1030
|
-
|
|
1080
|
+
|
|
1031
1081
|
# Wait a moment
|
|
1032
1082
|
time.sleep(2)
|
|
1033
|
-
|
|
1083
|
+
|
|
1034
1084
|
# Start the VM
|
|
1035
1085
|
console.print("[bold green]🚀 Starting VM...[/]")
|
|
1036
1086
|
cloner.start_vm(name, wait_for_agent=True, console=console)
|
|
1037
|
-
|
|
1087
|
+
|
|
1038
1088
|
console.print("[bold green]✅ VM restarted successfully![/]")
|
|
1039
1089
|
if getattr(args, "open", False):
|
|
1040
1090
|
cloner.open_gui(name, console=console)
|
|
@@ -1043,7 +1093,7 @@ def cmd_restart(args):
|
|
|
1043
1093
|
def cmd_delete(args):
|
|
1044
1094
|
"""Delete a VM."""
|
|
1045
1095
|
name = args.name
|
|
1046
|
-
|
|
1096
|
+
|
|
1047
1097
|
# If name is a path, load config
|
|
1048
1098
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1049
1099
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -1054,7 +1104,7 @@ def cmd_delete(args):
|
|
|
1054
1104
|
else:
|
|
1055
1105
|
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1056
1106
|
return
|
|
1057
|
-
|
|
1107
|
+
|
|
1058
1108
|
if not args.yes:
|
|
1059
1109
|
if not questionary.confirm(
|
|
1060
1110
|
f"Delete VM '{name}' and its storage?", default=False, style=custom_style
|
|
@@ -1230,16 +1280,26 @@ def cmd_status(args):
|
|
|
1230
1280
|
console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
|
|
1231
1281
|
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
1232
1282
|
console.print(" [dim]On host:[/]")
|
|
1233
|
-
console.print(
|
|
1234
|
-
|
|
1283
|
+
console.print(
|
|
1284
|
+
" [cyan]clonebox test . --user --validate[/] # Full validation (mounts/packages/services)"
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1235
1287
|
# Run full health check if requested
|
|
1236
1288
|
if getattr(args, "health", False):
|
|
1237
1289
|
console.print("\n[bold]🔄 Running full health check...[/]")
|
|
1238
1290
|
try:
|
|
1239
1291
|
result = subprocess.run(
|
|
1240
|
-
[
|
|
1241
|
-
|
|
1242
|
-
|
|
1292
|
+
[
|
|
1293
|
+
"virsh",
|
|
1294
|
+
"--connect",
|
|
1295
|
+
conn_uri,
|
|
1296
|
+
"qemu-agent-command",
|
|
1297
|
+
vm_name,
|
|
1298
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}',
|
|
1299
|
+
],
|
|
1300
|
+
capture_output=True,
|
|
1301
|
+
text=True,
|
|
1302
|
+
timeout=60,
|
|
1243
1303
|
)
|
|
1244
1304
|
console.print("[green]Health check triggered. View results with:[/]")
|
|
1245
1305
|
console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/]")
|
|
@@ -1253,13 +1313,13 @@ def cmd_export(args):
|
|
|
1253
1313
|
import subprocess
|
|
1254
1314
|
import tarfile
|
|
1255
1315
|
import shutil
|
|
1256
|
-
|
|
1316
|
+
|
|
1257
1317
|
name = args.name
|
|
1258
1318
|
user_session = getattr(args, "user", False)
|
|
1259
1319
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1260
1320
|
include_data = getattr(args, "include_data", False)
|
|
1261
1321
|
output = getattr(args, "output", None)
|
|
1262
|
-
|
|
1322
|
+
|
|
1263
1323
|
# If name is a path, load config
|
|
1264
1324
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1265
1325
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -1279,112 +1339,117 @@ def cmd_export(args):
|
|
|
1279
1339
|
console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
|
|
1280
1340
|
console.print("[dim]Usage: clonebox export . or clonebox export <vm-name>[/]")
|
|
1281
1341
|
return
|
|
1282
|
-
|
|
1342
|
+
|
|
1283
1343
|
console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
|
|
1284
|
-
|
|
1344
|
+
|
|
1285
1345
|
# Get actual disk location from virsh
|
|
1286
1346
|
try:
|
|
1287
1347
|
result = subprocess.run(
|
|
1288
1348
|
["virsh", "--connect", conn_uri, "domblklist", name, "--details"],
|
|
1289
|
-
capture_output=True,
|
|
1349
|
+
capture_output=True,
|
|
1350
|
+
text=True,
|
|
1351
|
+
timeout=10,
|
|
1290
1352
|
)
|
|
1291
1353
|
if result.returncode != 0:
|
|
1292
1354
|
console.print(f"[red]❌ VM '{name}' not found[/]")
|
|
1293
1355
|
return
|
|
1294
|
-
|
|
1356
|
+
|
|
1295
1357
|
# Parse disk paths from output
|
|
1296
1358
|
disk_path = None
|
|
1297
1359
|
cloudinit_path = None
|
|
1298
|
-
for line in result.stdout.split(
|
|
1299
|
-
if
|
|
1360
|
+
for line in result.stdout.split("\n"):
|
|
1361
|
+
if "disk" in line and ".qcow2" in line:
|
|
1300
1362
|
parts = line.split()
|
|
1301
1363
|
if len(parts) >= 4:
|
|
1302
1364
|
disk_path = Path(parts[3])
|
|
1303
|
-
elif
|
|
1365
|
+
elif "cdrom" in line or ".iso" in line:
|
|
1304
1366
|
parts = line.split()
|
|
1305
1367
|
if len(parts) >= 4:
|
|
1306
1368
|
cloudinit_path = Path(parts[3])
|
|
1307
|
-
|
|
1369
|
+
|
|
1308
1370
|
if not disk_path or not disk_path.exists():
|
|
1309
1371
|
console.print(f"[red]❌ VM disk not found[/]")
|
|
1310
1372
|
return
|
|
1311
|
-
|
|
1373
|
+
|
|
1312
1374
|
console.print(f"[dim]Disk location: {disk_path}[/]")
|
|
1313
|
-
|
|
1375
|
+
|
|
1314
1376
|
except Exception as e:
|
|
1315
1377
|
console.print(f"[red]❌ Error getting VM disk: {e}[/]")
|
|
1316
1378
|
return
|
|
1317
|
-
|
|
1379
|
+
|
|
1318
1380
|
# Create export directory
|
|
1319
1381
|
export_name = output or f"{name}-export.tar.gz"
|
|
1320
1382
|
if not export_name.endswith(".tar.gz"):
|
|
1321
1383
|
export_name += ".tar.gz"
|
|
1322
|
-
|
|
1384
|
+
|
|
1323
1385
|
export_path = Path(export_name).resolve()
|
|
1324
1386
|
temp_dir = Path(f"/tmp/clonebox-export-{name}")
|
|
1325
|
-
|
|
1387
|
+
|
|
1326
1388
|
try:
|
|
1327
1389
|
# Clean up temp dir if exists
|
|
1328
1390
|
if temp_dir.exists():
|
|
1329
1391
|
shutil.rmtree(temp_dir)
|
|
1330
1392
|
temp_dir.mkdir(parents=True)
|
|
1331
|
-
|
|
1393
|
+
|
|
1332
1394
|
# Stop VM if running
|
|
1333
1395
|
console.print("[cyan]Stopping VM for export...[/]")
|
|
1334
1396
|
subprocess.run(
|
|
1335
|
-
["virsh", "--connect", conn_uri, "shutdown", name],
|
|
1336
|
-
capture_output=True, timeout=30
|
|
1397
|
+
["virsh", "--connect", conn_uri, "shutdown", name], capture_output=True, timeout=30
|
|
1337
1398
|
)
|
|
1338
1399
|
import time
|
|
1400
|
+
|
|
1339
1401
|
time.sleep(5)
|
|
1340
1402
|
subprocess.run(
|
|
1341
|
-
["virsh", "--connect", conn_uri, "destroy", name],
|
|
1342
|
-
capture_output=True, timeout=10
|
|
1403
|
+
["virsh", "--connect", conn_uri, "destroy", name], capture_output=True, timeout=10
|
|
1343
1404
|
)
|
|
1344
|
-
|
|
1405
|
+
|
|
1345
1406
|
# Export VM XML
|
|
1346
1407
|
console.print("[cyan]Exporting VM definition...[/]")
|
|
1347
1408
|
result = subprocess.run(
|
|
1348
1409
|
["virsh", "--connect", conn_uri, "dumpxml", name],
|
|
1349
|
-
capture_output=True,
|
|
1410
|
+
capture_output=True,
|
|
1411
|
+
text=True,
|
|
1412
|
+
timeout=30,
|
|
1350
1413
|
)
|
|
1351
1414
|
(temp_dir / "vm.xml").write_text(result.stdout)
|
|
1352
|
-
|
|
1415
|
+
|
|
1353
1416
|
# Copy disk image
|
|
1354
1417
|
console.print("[cyan]Copying disk image (this may take a while)...[/]")
|
|
1355
1418
|
if disk_path and disk_path.exists():
|
|
1356
1419
|
shutil.copy2(disk_path, temp_dir / "disk.qcow2")
|
|
1357
|
-
console.print(
|
|
1420
|
+
console.print(
|
|
1421
|
+
f"[green]✅ Disk copied: {disk_path.stat().st_size / (1024**3):.2f} GB[/]"
|
|
1422
|
+
)
|
|
1358
1423
|
else:
|
|
1359
1424
|
console.print("[yellow]⚠️ Disk image not found[/]")
|
|
1360
|
-
|
|
1425
|
+
|
|
1361
1426
|
# Copy cloud-init ISO
|
|
1362
1427
|
if cloudinit_path and cloudinit_path.exists():
|
|
1363
1428
|
shutil.copy2(cloudinit_path, temp_dir / "cloud-init.iso")
|
|
1364
1429
|
console.print("[green]✅ Cloud-init ISO copied[/]")
|
|
1365
|
-
|
|
1430
|
+
|
|
1366
1431
|
# Copy config file
|
|
1367
1432
|
config_file = Path.cwd() / ".clonebox.yaml"
|
|
1368
1433
|
if config_file.exists():
|
|
1369
1434
|
shutil.copy2(config_file, temp_dir / ".clonebox.yaml")
|
|
1370
|
-
|
|
1435
|
+
|
|
1371
1436
|
# Copy .env file (without sensitive data warning)
|
|
1372
1437
|
env_file = Path.cwd() / ".env"
|
|
1373
1438
|
if env_file.exists():
|
|
1374
1439
|
shutil.copy2(env_file, temp_dir / ".env")
|
|
1375
|
-
|
|
1440
|
+
|
|
1376
1441
|
# Include shared data if requested
|
|
1377
1442
|
if include_data:
|
|
1378
1443
|
console.print("[cyan]Bundling shared data (browser profiles, configs)...[/]")
|
|
1379
1444
|
data_dir = temp_dir / "data"
|
|
1380
1445
|
data_dir.mkdir()
|
|
1381
|
-
|
|
1446
|
+
|
|
1382
1447
|
# Load config to get paths
|
|
1383
1448
|
if config_file.exists():
|
|
1384
1449
|
config = load_clonebox_config(config_file)
|
|
1385
1450
|
all_paths = config.get("paths", {}).copy()
|
|
1386
1451
|
all_paths.update(config.get("app_data_paths", {}))
|
|
1387
|
-
|
|
1452
|
+
|
|
1388
1453
|
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
1389
1454
|
host_p = Path(host_path)
|
|
1390
1455
|
if host_p.exists():
|
|
@@ -1392,41 +1457,45 @@ def cmd_export(args):
|
|
|
1392
1457
|
console.print(f" [dim]Copying {host_path}...[/]")
|
|
1393
1458
|
try:
|
|
1394
1459
|
if host_p.is_dir():
|
|
1395
|
-
shutil.copytree(
|
|
1396
|
-
|
|
1460
|
+
shutil.copytree(
|
|
1461
|
+
host_p,
|
|
1462
|
+
dest,
|
|
1463
|
+
symlinks=True,
|
|
1464
|
+
ignore=shutil.ignore_patterns("*.pyc", "__pycache__", ".git"),
|
|
1465
|
+
)
|
|
1397
1466
|
else:
|
|
1398
1467
|
shutil.copy2(host_p, dest)
|
|
1399
1468
|
except Exception as e:
|
|
1400
1469
|
console.print(f" [yellow]⚠️ Skipped {host_path}: {e}[/]")
|
|
1401
|
-
|
|
1470
|
+
|
|
1402
1471
|
# Save path mapping
|
|
1403
1472
|
import json
|
|
1473
|
+
|
|
1404
1474
|
(data_dir / "paths.json").write_text(json.dumps(all_paths, indent=2))
|
|
1405
|
-
|
|
1475
|
+
|
|
1406
1476
|
# Create tarball
|
|
1407
1477
|
console.print(f"[cyan]Creating archive: {export_path}[/]")
|
|
1408
1478
|
with tarfile.open(export_path, "w:gz") as tar:
|
|
1409
1479
|
tar.add(temp_dir, arcname=name)
|
|
1410
|
-
|
|
1480
|
+
|
|
1411
1481
|
# Get size
|
|
1412
1482
|
size_mb = export_path.stat().st_size / 1024 / 1024
|
|
1413
|
-
|
|
1483
|
+
|
|
1414
1484
|
console.print(f"\n[bold green]✅ Export complete![/]")
|
|
1415
1485
|
console.print(f" File: [cyan]{export_path}[/]")
|
|
1416
1486
|
console.print(f" Size: [cyan]{size_mb:.1f} MB[/]")
|
|
1417
1487
|
console.print(f"\n[bold]To import on another workstation:[/]")
|
|
1418
1488
|
console.print(f" [cyan]clonebox import {export_path.name}[/]")
|
|
1419
|
-
|
|
1489
|
+
|
|
1420
1490
|
finally:
|
|
1421
1491
|
# Cleanup
|
|
1422
1492
|
if temp_dir.exists():
|
|
1423
1493
|
shutil.rmtree(temp_dir)
|
|
1424
|
-
|
|
1494
|
+
|
|
1425
1495
|
# Restart VM
|
|
1426
1496
|
console.print("\n[cyan]Restarting VM...[/]")
|
|
1427
1497
|
subprocess.run(
|
|
1428
|
-
["virsh", "--connect", conn_uri, "start", name],
|
|
1429
|
-
capture_output=True, timeout=30
|
|
1498
|
+
["virsh", "--connect", conn_uri, "start", name], capture_output=True, timeout=30
|
|
1430
1499
|
)
|
|
1431
1500
|
|
|
1432
1501
|
|
|
@@ -1435,89 +1504,92 @@ def cmd_import(args):
|
|
|
1435
1504
|
import subprocess
|
|
1436
1505
|
import tarfile
|
|
1437
1506
|
import shutil
|
|
1438
|
-
|
|
1507
|
+
|
|
1439
1508
|
archive_path = Path(args.archive).resolve()
|
|
1440
1509
|
user_session = getattr(args, "user", False)
|
|
1441
1510
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1442
|
-
|
|
1511
|
+
|
|
1443
1512
|
if not archive_path.exists():
|
|
1444
1513
|
console.print(f"[red]❌ Archive not found: {archive_path}[/]")
|
|
1445
1514
|
return
|
|
1446
|
-
|
|
1515
|
+
|
|
1447
1516
|
console.print(f"[bold cyan]📥 Importing VM from: {archive_path}[/]\n")
|
|
1448
|
-
|
|
1517
|
+
|
|
1449
1518
|
# Determine storage path
|
|
1450
1519
|
if user_session:
|
|
1451
1520
|
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
1452
1521
|
else:
|
|
1453
1522
|
storage_base = Path("/var/lib/libvirt/images")
|
|
1454
|
-
|
|
1523
|
+
|
|
1455
1524
|
storage_base.mkdir(parents=True, exist_ok=True)
|
|
1456
|
-
|
|
1525
|
+
|
|
1457
1526
|
temp_dir = Path(f"/tmp/clonebox-import-{archive_path.stem}")
|
|
1458
|
-
|
|
1527
|
+
|
|
1459
1528
|
try:
|
|
1460
1529
|
# Extract archive
|
|
1461
1530
|
console.print("[cyan]Extracting archive...[/]")
|
|
1462
1531
|
if temp_dir.exists():
|
|
1463
1532
|
shutil.rmtree(temp_dir)
|
|
1464
1533
|
temp_dir.mkdir(parents=True)
|
|
1465
|
-
|
|
1534
|
+
|
|
1466
1535
|
with tarfile.open(archive_path, "r:gz") as tar:
|
|
1467
1536
|
tar.extractall(temp_dir)
|
|
1468
|
-
|
|
1537
|
+
|
|
1469
1538
|
# Find extracted VM directory
|
|
1470
1539
|
vm_dirs = list(temp_dir.iterdir())
|
|
1471
1540
|
if not vm_dirs:
|
|
1472
1541
|
console.print("[red]❌ Empty archive[/]")
|
|
1473
1542
|
return
|
|
1474
|
-
|
|
1543
|
+
|
|
1475
1544
|
extracted_dir = vm_dirs[0]
|
|
1476
1545
|
vm_name = extracted_dir.name
|
|
1477
|
-
|
|
1546
|
+
|
|
1478
1547
|
console.print(f"[cyan]VM Name: {vm_name}[/]")
|
|
1479
|
-
|
|
1548
|
+
|
|
1480
1549
|
# Create VM storage directory
|
|
1481
1550
|
vm_storage = storage_base / vm_name
|
|
1482
1551
|
if vm_storage.exists():
|
|
1483
1552
|
if not getattr(args, "replace", False):
|
|
1484
|
-
console.print(
|
|
1553
|
+
console.print(
|
|
1554
|
+
f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]"
|
|
1555
|
+
)
|
|
1485
1556
|
return
|
|
1486
1557
|
shutil.rmtree(vm_storage)
|
|
1487
|
-
|
|
1558
|
+
|
|
1488
1559
|
vm_storage.mkdir(parents=True)
|
|
1489
|
-
|
|
1560
|
+
|
|
1490
1561
|
# Copy disk image
|
|
1491
1562
|
console.print("[cyan]Copying disk image...[/]")
|
|
1492
1563
|
disk_src = extracted_dir / "disk.qcow2"
|
|
1493
1564
|
if disk_src.exists():
|
|
1494
1565
|
shutil.copy2(disk_src, vm_storage / f"{vm_name}.qcow2")
|
|
1495
|
-
|
|
1566
|
+
|
|
1496
1567
|
# Copy cloud-init ISO
|
|
1497
1568
|
cloudinit_src = extracted_dir / "cloud-init.iso"
|
|
1498
1569
|
if cloudinit_src.exists():
|
|
1499
1570
|
shutil.copy2(cloudinit_src, vm_storage / "cloud-init.iso")
|
|
1500
|
-
|
|
1571
|
+
|
|
1501
1572
|
# Copy config files to current directory
|
|
1502
1573
|
config_src = extracted_dir / ".clonebox.yaml"
|
|
1503
1574
|
if config_src.exists():
|
|
1504
1575
|
shutil.copy2(config_src, Path.cwd() / ".clonebox.yaml")
|
|
1505
1576
|
console.print("[green]✅ Copied .clonebox.yaml[/]")
|
|
1506
|
-
|
|
1577
|
+
|
|
1507
1578
|
env_src = extracted_dir / ".env"
|
|
1508
1579
|
if env_src.exists():
|
|
1509
1580
|
shutil.copy2(env_src, Path.cwd() / ".env")
|
|
1510
1581
|
console.print("[green]✅ Copied .env[/]")
|
|
1511
|
-
|
|
1582
|
+
|
|
1512
1583
|
# Restore data if included
|
|
1513
1584
|
data_dir = extracted_dir / "data"
|
|
1514
1585
|
if data_dir.exists():
|
|
1515
1586
|
import json
|
|
1587
|
+
|
|
1516
1588
|
paths_file = data_dir / "paths.json"
|
|
1517
1589
|
if paths_file.exists():
|
|
1518
1590
|
paths_mapping = json.loads(paths_file.read_text())
|
|
1519
1591
|
console.print("\n[cyan]Restoring shared data...[/]")
|
|
1520
|
-
|
|
1592
|
+
|
|
1521
1593
|
for idx, (host_path, guest_path) in enumerate(paths_mapping.items()):
|
|
1522
1594
|
src = data_dir / f"mount{idx}"
|
|
1523
1595
|
if src.exists():
|
|
@@ -1534,38 +1606,38 @@ def cmd_import(args):
|
|
|
1534
1606
|
shutil.copy2(src, dest)
|
|
1535
1607
|
except Exception as e:
|
|
1536
1608
|
console.print(f" [yellow]⚠️ Error: {e}[/]")
|
|
1537
|
-
|
|
1609
|
+
|
|
1538
1610
|
# Modify and define VM XML
|
|
1539
1611
|
console.print("\n[cyan]Defining VM...[/]")
|
|
1540
1612
|
xml_src = extracted_dir / "vm.xml"
|
|
1541
1613
|
if xml_src.exists():
|
|
1542
1614
|
xml_content = xml_src.read_text()
|
|
1543
|
-
|
|
1615
|
+
|
|
1544
1616
|
# Update paths in XML to new storage location
|
|
1545
1617
|
# This is a simple replacement - may need more sophisticated handling
|
|
1546
|
-
xml_content = xml_content.replace(
|
|
1547
|
-
|
|
1548
|
-
)
|
|
1549
|
-
|
|
1618
|
+
xml_content = xml_content.replace(f"/home/", f"{Path.home()}/")
|
|
1619
|
+
|
|
1550
1620
|
# Write modified XML
|
|
1551
1621
|
modified_xml = temp_dir / "vm-modified.xml"
|
|
1552
1622
|
modified_xml.write_text(xml_content)
|
|
1553
|
-
|
|
1623
|
+
|
|
1554
1624
|
# Define VM
|
|
1555
1625
|
result = subprocess.run(
|
|
1556
1626
|
["virsh", "--connect", conn_uri, "define", str(modified_xml)],
|
|
1557
|
-
capture_output=True,
|
|
1627
|
+
capture_output=True,
|
|
1628
|
+
text=True,
|
|
1629
|
+
timeout=30,
|
|
1558
1630
|
)
|
|
1559
|
-
|
|
1631
|
+
|
|
1560
1632
|
if result.returncode == 0:
|
|
1561
1633
|
console.print(f"[green]✅ VM '{vm_name}' defined successfully![/]")
|
|
1562
1634
|
else:
|
|
1563
1635
|
console.print(f"[yellow]⚠️ VM definition warning: {result.stderr}[/]")
|
|
1564
|
-
|
|
1636
|
+
|
|
1565
1637
|
console.print(f"\n[bold green]✅ Import complete![/]")
|
|
1566
1638
|
console.print(f"\n[bold]To start the VM:[/]")
|
|
1567
1639
|
console.print(f" [cyan]clonebox start . {'--user' if user_session else ''}[/]")
|
|
1568
|
-
|
|
1640
|
+
|
|
1569
1641
|
finally:
|
|
1570
1642
|
# Cleanup
|
|
1571
1643
|
if temp_dir.exists():
|
|
@@ -1577,7 +1649,7 @@ def cmd_test(args):
|
|
|
1577
1649
|
import subprocess
|
|
1578
1650
|
import json
|
|
1579
1651
|
from clonebox.validator import VMValidator
|
|
1580
|
-
|
|
1652
|
+
|
|
1581
1653
|
name = args.name
|
|
1582
1654
|
user_session = getattr(args, "user", False)
|
|
1583
1655
|
quick = getattr(args, "quick", False)
|
|
@@ -1586,7 +1658,7 @@ def cmd_test(args):
|
|
|
1586
1658
|
require_running_apps = getattr(args, "require_running_apps", False)
|
|
1587
1659
|
smoke_test = getattr(args, "smoke_test", False)
|
|
1588
1660
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1589
|
-
|
|
1661
|
+
|
|
1590
1662
|
# If name is a path, load config
|
|
1591
1663
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1592
1664
|
target_path = Path(name).expanduser().resolve()
|
|
@@ -1599,9 +1671,9 @@ def cmd_test(args):
|
|
|
1599
1671
|
if not config_file.exists():
|
|
1600
1672
|
console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
|
|
1601
1673
|
return
|
|
1602
|
-
|
|
1674
|
+
|
|
1603
1675
|
console.print(f"[bold cyan]🧪 Testing VM configuration: {config_file}[/]\n")
|
|
1604
|
-
|
|
1676
|
+
|
|
1605
1677
|
# Load config
|
|
1606
1678
|
try:
|
|
1607
1679
|
config = load_clonebox_config(config_file)
|
|
@@ -1614,21 +1686,23 @@ def cmd_test(args):
|
|
|
1614
1686
|
except Exception as e:
|
|
1615
1687
|
console.print(f"[red]❌ Failed to load config: {e}[/]")
|
|
1616
1688
|
return
|
|
1617
|
-
|
|
1689
|
+
|
|
1618
1690
|
console.print()
|
|
1619
|
-
|
|
1691
|
+
|
|
1620
1692
|
# Test 1: Check VM exists
|
|
1621
1693
|
console.print("[bold]1. VM Existence Check[/]")
|
|
1622
1694
|
try:
|
|
1623
1695
|
result = subprocess.run(
|
|
1624
1696
|
["virsh", "--connect", conn_uri, "dominfo", vm_name],
|
|
1625
|
-
capture_output=True,
|
|
1697
|
+
capture_output=True,
|
|
1698
|
+
text=True,
|
|
1699
|
+
timeout=10,
|
|
1626
1700
|
)
|
|
1627
1701
|
if result.returncode == 0:
|
|
1628
1702
|
console.print("[green]✅ VM is defined in libvirt[/]")
|
|
1629
1703
|
if verbose:
|
|
1630
|
-
for line in result.stdout.split(
|
|
1631
|
-
if
|
|
1704
|
+
for line in result.stdout.split("\n"):
|
|
1705
|
+
if ":" in line:
|
|
1632
1706
|
console.print(f" {line}")
|
|
1633
1707
|
else:
|
|
1634
1708
|
console.print("[red]❌ VM not found in libvirt[/]")
|
|
@@ -1637,32 +1711,36 @@ def cmd_test(args):
|
|
|
1637
1711
|
except Exception as e:
|
|
1638
1712
|
console.print(f"[red]❌ Error checking VM: {e}[/]")
|
|
1639
1713
|
return
|
|
1640
|
-
|
|
1714
|
+
|
|
1641
1715
|
console.print()
|
|
1642
|
-
|
|
1716
|
+
|
|
1643
1717
|
# Test 2: Check VM state
|
|
1644
1718
|
console.print("[bold]2. VM State Check[/]")
|
|
1645
1719
|
try:
|
|
1646
1720
|
result = subprocess.run(
|
|
1647
1721
|
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
1648
|
-
capture_output=True,
|
|
1722
|
+
capture_output=True,
|
|
1723
|
+
text=True,
|
|
1724
|
+
timeout=10,
|
|
1649
1725
|
)
|
|
1650
1726
|
state = result.stdout.strip()
|
|
1651
1727
|
if state == "running":
|
|
1652
1728
|
console.print("[green]✅ VM is running[/]")
|
|
1653
|
-
|
|
1729
|
+
|
|
1654
1730
|
# Test network if running
|
|
1655
1731
|
console.print("\n Checking network...")
|
|
1656
1732
|
try:
|
|
1657
1733
|
result = subprocess.run(
|
|
1658
1734
|
["virsh", "--connect", conn_uri, "domifaddr", vm_name],
|
|
1659
|
-
capture_output=True,
|
|
1735
|
+
capture_output=True,
|
|
1736
|
+
text=True,
|
|
1737
|
+
timeout=10,
|
|
1660
1738
|
)
|
|
1661
1739
|
if "192.168" in result.stdout or "10.0" in result.stdout:
|
|
1662
1740
|
console.print("[green]✅ VM has network access[/]")
|
|
1663
1741
|
if verbose:
|
|
1664
|
-
for line in result.stdout.split(
|
|
1665
|
-
if
|
|
1742
|
+
for line in result.stdout.split("\n"):
|
|
1743
|
+
if "192.168" in line or "10.0" in line:
|
|
1666
1744
|
console.print(f" IP: {line.split()[-1]}")
|
|
1667
1745
|
else:
|
|
1668
1746
|
console.print("[yellow]⚠️ No IP address detected via virsh domifaddr[/]")
|
|
@@ -1673,9 +1751,16 @@ def cmd_test(args):
|
|
|
1673
1751
|
from clonebox.cli import _qga_ping, _qga_exec
|
|
1674
1752
|
if _qga_ping(vm_name, conn_uri):
|
|
1675
1753
|
try:
|
|
1676
|
-
ip_out = _qga_exec(
|
|
1754
|
+
ip_out = _qga_exec(
|
|
1755
|
+
vm_name,
|
|
1756
|
+
conn_uri,
|
|
1757
|
+
"ip -4 -o addr show scope global | awk '{print $4}'",
|
|
1758
|
+
timeout=5,
|
|
1759
|
+
)
|
|
1677
1760
|
if ip_out and ip_out.strip():
|
|
1678
|
-
console.print(
|
|
1761
|
+
console.print(
|
|
1762
|
+
f"[green]✅ VM has network access (IP via QGA: {ip_out.strip()})[/]"
|
|
1763
|
+
)
|
|
1679
1764
|
else:
|
|
1680
1765
|
console.print("[yellow]⚠️ IP not available via QGA[/]")
|
|
1681
1766
|
except Exception as e:
|
|
@@ -1689,18 +1774,26 @@ def cmd_test(args):
|
|
|
1689
1774
|
console.print(" Run: clonebox start .")
|
|
1690
1775
|
except Exception as e:
|
|
1691
1776
|
console.print(f"[red]❌ Error checking VM state: {e}[/]")
|
|
1692
|
-
|
|
1777
|
+
|
|
1693
1778
|
console.print()
|
|
1694
|
-
|
|
1779
|
+
|
|
1695
1780
|
# Test 3: Check cloud-init status (if running)
|
|
1696
1781
|
if not quick and state == "running":
|
|
1697
1782
|
console.print("[bold]3. Cloud-init Status[/]")
|
|
1698
1783
|
try:
|
|
1699
1784
|
# Try to get cloud-init status via QEMU guest agent
|
|
1700
1785
|
result = subprocess.run(
|
|
1701
|
-
[
|
|
1702
|
-
|
|
1703
|
-
|
|
1786
|
+
[
|
|
1787
|
+
"virsh",
|
|
1788
|
+
"--connect",
|
|
1789
|
+
conn_uri,
|
|
1790
|
+
"qemu-agent-command",
|
|
1791
|
+
vm_name,
|
|
1792
|
+
'{"execute":"guest-exec","arguments":{"path":"cloud-init","arg":["status"],"capture-output":true}}',
|
|
1793
|
+
],
|
|
1794
|
+
capture_output=True,
|
|
1795
|
+
text=True,
|
|
1796
|
+
timeout=15,
|
|
1704
1797
|
)
|
|
1705
1798
|
if result.returncode == 0:
|
|
1706
1799
|
try:
|
|
@@ -1709,9 +1802,17 @@ def cmd_test(args):
|
|
|
1709
1802
|
pid = response["return"]["pid"]
|
|
1710
1803
|
# Get output
|
|
1711
1804
|
result2 = subprocess.run(
|
|
1712
|
-
[
|
|
1713
|
-
|
|
1714
|
-
|
|
1805
|
+
[
|
|
1806
|
+
"virsh",
|
|
1807
|
+
"--connect",
|
|
1808
|
+
conn_uri,
|
|
1809
|
+
"qemu-agent-command",
|
|
1810
|
+
vm_name,
|
|
1811
|
+
f'{{"execute":"guest-exec-status","arguments":{"pid":{pid}}}}',
|
|
1812
|
+
],
|
|
1813
|
+
capture_output=True,
|
|
1814
|
+
text=True,
|
|
1815
|
+
timeout=15,
|
|
1715
1816
|
)
|
|
1716
1817
|
if result2.returncode == 0:
|
|
1717
1818
|
resp2 = json.loads(result2.stdout)
|
|
@@ -1719,31 +1820,38 @@ def cmd_test(args):
|
|
|
1719
1820
|
output = resp2["return"]["out-data"]
|
|
1720
1821
|
if output:
|
|
1721
1822
|
import base64
|
|
1823
|
+
|
|
1722
1824
|
status = base64.b64decode(output).decode()
|
|
1723
1825
|
if "done" in status.lower():
|
|
1724
1826
|
console.print("[green]✅ Cloud-init completed[/]")
|
|
1725
1827
|
elif "running" in status.lower():
|
|
1726
1828
|
console.print("[yellow]⚠️ Cloud-init still running[/]")
|
|
1727
1829
|
else:
|
|
1728
|
-
console.print(
|
|
1830
|
+
console.print(
|
|
1831
|
+
f"[yellow]⚠️ Cloud-init status: {status.strip()}[/]"
|
|
1832
|
+
)
|
|
1729
1833
|
except:
|
|
1730
1834
|
pass
|
|
1731
1835
|
except:
|
|
1732
|
-
console.print(
|
|
1733
|
-
|
|
1836
|
+
console.print(
|
|
1837
|
+
"[yellow]⚠️ Could not check cloud-init (QEMU agent may not be running)[/]"
|
|
1838
|
+
)
|
|
1839
|
+
|
|
1734
1840
|
console.print()
|
|
1735
|
-
|
|
1841
|
+
|
|
1736
1842
|
# Test 4: Check mounts (if running)
|
|
1737
1843
|
if not quick and state == "running":
|
|
1738
1844
|
console.print("[bold]4. Mount Points Check[/]")
|
|
1739
1845
|
all_paths = config.get("paths", {}).copy()
|
|
1740
1846
|
all_paths.update(config.get("app_data_paths", {}))
|
|
1741
|
-
|
|
1847
|
+
|
|
1742
1848
|
if all_paths:
|
|
1743
1849
|
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
1744
1850
|
try:
|
|
1745
1851
|
# Use the same QGA helper as diagnose/status
|
|
1746
|
-
is_accessible = _qga_exec(
|
|
1852
|
+
is_accessible = _qga_exec(
|
|
1853
|
+
vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5
|
|
1854
|
+
)
|
|
1747
1855
|
if is_accessible == "yes":
|
|
1748
1856
|
console.print(f"[green]✅ {guest_path}[/]")
|
|
1749
1857
|
else:
|
|
@@ -1752,17 +1860,25 @@ def cmd_test(args):
|
|
|
1752
1860
|
console.print(f"[yellow]⚠️ {guest_path} (could not check)[/]")
|
|
1753
1861
|
else:
|
|
1754
1862
|
console.print("[dim]No mount points configured[/]")
|
|
1755
|
-
|
|
1863
|
+
|
|
1756
1864
|
console.print()
|
|
1757
|
-
|
|
1865
|
+
|
|
1758
1866
|
# Test 5: Run health check (if running and not quick)
|
|
1759
1867
|
if not quick and state == "running":
|
|
1760
1868
|
console.print("[bold]5. Health Check[/]")
|
|
1761
1869
|
try:
|
|
1762
1870
|
result = subprocess.run(
|
|
1763
|
-
[
|
|
1764
|
-
|
|
1765
|
-
|
|
1871
|
+
[
|
|
1872
|
+
"virsh",
|
|
1873
|
+
"--connect",
|
|
1874
|
+
conn_uri,
|
|
1875
|
+
"qemu-agent-command",
|
|
1876
|
+
vm_name,
|
|
1877
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}',
|
|
1878
|
+
],
|
|
1879
|
+
capture_output=True,
|
|
1880
|
+
text=True,
|
|
1881
|
+
timeout=60,
|
|
1766
1882
|
)
|
|
1767
1883
|
if result.returncode == 0:
|
|
1768
1884
|
console.print("[green]✅ Health check triggered[/]")
|
|
@@ -1772,9 +1888,9 @@ def cmd_test(args):
|
|
|
1772
1888
|
console.print(" VM may not have been created with health checks")
|
|
1773
1889
|
except Exception as e:
|
|
1774
1890
|
console.print(f"[yellow]⚠️ Could not run health check: {e}[/]")
|
|
1775
|
-
|
|
1891
|
+
|
|
1776
1892
|
console.print()
|
|
1777
|
-
|
|
1893
|
+
|
|
1778
1894
|
# Run full validation if requested
|
|
1779
1895
|
if validate_all and state == "running":
|
|
1780
1896
|
validator = VMValidator(
|
|
@@ -1786,7 +1902,7 @@ def cmd_test(args):
|
|
|
1786
1902
|
smoke_test=smoke_test,
|
|
1787
1903
|
)
|
|
1788
1904
|
results = validator.validate_all()
|
|
1789
|
-
|
|
1905
|
+
|
|
1790
1906
|
# Exit with error code if validations failed
|
|
1791
1907
|
if results["overall"] == "partial":
|
|
1792
1908
|
return 1
|
|
@@ -1798,7 +1914,7 @@ def cmd_test(args):
|
|
|
1798
1914
|
console.print("[dim] clonebox test . --user --validate[/]")
|
|
1799
1915
|
console.print("\n[dim]For detailed health report, run in VM:[/]")
|
|
1800
1916
|
console.print("[dim] cat /var/log/clonebox-health.log[/]")
|
|
1801
|
-
|
|
1917
|
+
|
|
1802
1918
|
return 0
|
|
1803
1919
|
|
|
1804
1920
|
|
|
@@ -1811,16 +1927,16 @@ def load_env_file(env_path: Path) -> dict:
|
|
|
1811
1927
|
env_vars = {}
|
|
1812
1928
|
if not env_path.exists():
|
|
1813
1929
|
return env_vars
|
|
1814
|
-
|
|
1930
|
+
|
|
1815
1931
|
with open(env_path) as f:
|
|
1816
1932
|
for line in f:
|
|
1817
1933
|
line = line.strip()
|
|
1818
|
-
if not line or line.startswith(
|
|
1934
|
+
if not line or line.startswith("#"):
|
|
1819
1935
|
continue
|
|
1820
|
-
if
|
|
1821
|
-
key, value = line.split(
|
|
1936
|
+
if "=" in line:
|
|
1937
|
+
key, value = line.split("=", 1)
|
|
1822
1938
|
env_vars[key.strip()] = value.strip()
|
|
1823
|
-
|
|
1939
|
+
|
|
1824
1940
|
return env_vars
|
|
1825
1941
|
|
|
1826
1942
|
|
|
@@ -1831,7 +1947,8 @@ def expand_env_vars(value, env_vars: dict):
|
|
|
1831
1947
|
def replacer(match):
|
|
1832
1948
|
var_name = match.group(1)
|
|
1833
1949
|
return env_vars.get(var_name, os.environ.get(var_name, match.group(0)))
|
|
1834
|
-
|
|
1950
|
+
|
|
1951
|
+
return re.sub(r"\$\{([^}]+)\}", replacer, value)
|
|
1835
1952
|
elif isinstance(value, dict):
|
|
1836
1953
|
return {k: expand_env_vars(v, env_vars) for k, v in value.items()}
|
|
1837
1954
|
elif isinstance(value, list):
|
|
@@ -1866,12 +1983,27 @@ def generate_clonebox_yaml(
|
|
|
1866
1983
|
|
|
1867
1984
|
# Services that should NOT be cloned to VM (host-specific)
|
|
1868
1985
|
VM_EXCLUDED_SERVICES = {
|
|
1869
|
-
"libvirtd",
|
|
1870
|
-
"
|
|
1871
|
-
"
|
|
1872
|
-
"
|
|
1873
|
-
"
|
|
1874
|
-
"
|
|
1986
|
+
"libvirtd",
|
|
1987
|
+
"virtlogd",
|
|
1988
|
+
"libvirt-guests",
|
|
1989
|
+
"qemu-guest-agent",
|
|
1990
|
+
"bluetooth",
|
|
1991
|
+
"bluez",
|
|
1992
|
+
"upower",
|
|
1993
|
+
"thermald",
|
|
1994
|
+
"tlp",
|
|
1995
|
+
"power-profiles-daemon",
|
|
1996
|
+
"gdm",
|
|
1997
|
+
"gdm3",
|
|
1998
|
+
"sddm",
|
|
1999
|
+
"lightdm",
|
|
2000
|
+
"snap.cups.cups-browsed",
|
|
2001
|
+
"snap.cups.cupsd",
|
|
2002
|
+
"ModemManager",
|
|
2003
|
+
"wpa_supplicant",
|
|
2004
|
+
"accounts-daemon",
|
|
2005
|
+
"colord",
|
|
2006
|
+
"switcheroo-control",
|
|
1875
2007
|
}
|
|
1876
2008
|
|
|
1877
2009
|
# Collect services (excluding host-specific ones)
|
|
@@ -1971,7 +2103,7 @@ def generate_clonebox_yaml(
|
|
|
1971
2103
|
# Auto-detect packages from running applications and services
|
|
1972
2104
|
app_packages = detector.suggest_packages_for_apps(snapshot.applications)
|
|
1973
2105
|
service_packages = detector.suggest_packages_for_services(snapshot.running_services)
|
|
1974
|
-
|
|
2106
|
+
|
|
1975
2107
|
# Combine with base packages (apt only)
|
|
1976
2108
|
base_packages = [
|
|
1977
2109
|
"build-essential",
|
|
@@ -1979,12 +2111,12 @@ def generate_clonebox_yaml(
|
|
|
1979
2111
|
"curl",
|
|
1980
2112
|
"vim",
|
|
1981
2113
|
]
|
|
1982
|
-
|
|
2114
|
+
|
|
1983
2115
|
# Merge apt packages and deduplicate
|
|
1984
2116
|
all_apt_packages = base_packages + app_packages["apt"] + service_packages["apt"]
|
|
1985
2117
|
if deduplicate:
|
|
1986
2118
|
all_apt_packages = deduplicate_list(all_apt_packages)
|
|
1987
|
-
|
|
2119
|
+
|
|
1988
2120
|
# Merge snap packages and deduplicate
|
|
1989
2121
|
all_snap_packages = app_packages["snap"] + service_packages["snap"]
|
|
1990
2122
|
if deduplicate:
|
|
@@ -2000,7 +2132,9 @@ def generate_clonebox_yaml(
|
|
|
2000
2132
|
if guest_path == "/home/ubuntu/.config/JetBrains":
|
|
2001
2133
|
remapped[host_path] = "/home/ubuntu/snap/pycharm-community/common/.config/JetBrains"
|
|
2002
2134
|
elif guest_path == "/home/ubuntu/.local/share/JetBrains":
|
|
2003
|
-
remapped[host_path] =
|
|
2135
|
+
remapped[host_path] = (
|
|
2136
|
+
"/home/ubuntu/snap/pycharm-community/common/.local/share/JetBrains"
|
|
2137
|
+
)
|
|
2004
2138
|
elif guest_path == "/home/ubuntu/.cache/JetBrains":
|
|
2005
2139
|
remapped[host_path] = "/home/ubuntu/snap/pycharm-community/common/.cache/JetBrains"
|
|
2006
2140
|
else:
|
|
@@ -2074,10 +2208,10 @@ def load_clonebox_config(path: Path) -> dict:
|
|
|
2074
2208
|
# Load YAML config
|
|
2075
2209
|
with open(config_file) as f:
|
|
2076
2210
|
config = yaml.safe_load(f)
|
|
2077
|
-
|
|
2211
|
+
|
|
2078
2212
|
# Expand environment variables in config
|
|
2079
2213
|
config = expand_env_vars(config, env_vars)
|
|
2080
|
-
|
|
2214
|
+
|
|
2081
2215
|
return config
|
|
2082
2216
|
|
|
2083
2217
|
|
|
@@ -2085,42 +2219,45 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
2085
2219
|
"""Monitor cloud-init status in VM and show progress."""
|
|
2086
2220
|
import subprocess
|
|
2087
2221
|
import time
|
|
2088
|
-
|
|
2222
|
+
|
|
2089
2223
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
2090
2224
|
start_time = time.time()
|
|
2091
2225
|
shutdown_count = 0 # Count consecutive shutdown detections
|
|
2092
2226
|
restart_detected = False
|
|
2093
|
-
|
|
2227
|
+
|
|
2094
2228
|
with Progress(
|
|
2095
2229
|
SpinnerColumn(),
|
|
2096
2230
|
TextColumn("[progress.description]{task.description}"),
|
|
2097
2231
|
console=console,
|
|
2098
2232
|
) as progress:
|
|
2099
2233
|
task = progress.add_task("[cyan]Starting VM and initializing...", total=None)
|
|
2100
|
-
|
|
2234
|
+
|
|
2101
2235
|
while time.time() - start_time < timeout:
|
|
2102
2236
|
try:
|
|
2103
2237
|
elapsed = int(time.time() - start_time)
|
|
2104
2238
|
minutes = elapsed // 60
|
|
2105
2239
|
seconds = elapsed % 60
|
|
2106
|
-
|
|
2240
|
+
|
|
2107
2241
|
# Check VM state
|
|
2108
2242
|
result = subprocess.run(
|
|
2109
2243
|
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
2110
2244
|
capture_output=True,
|
|
2111
2245
|
text=True,
|
|
2112
|
-
timeout=5
|
|
2246
|
+
timeout=5,
|
|
2113
2247
|
)
|
|
2114
|
-
|
|
2248
|
+
|
|
2115
2249
|
vm_state = result.stdout.strip().lower()
|
|
2116
|
-
|
|
2250
|
+
|
|
2117
2251
|
if "shut off" in vm_state or "shutting down" in vm_state:
|
|
2118
2252
|
# VM is shutting down - count consecutive detections
|
|
2119
2253
|
shutdown_count += 1
|
|
2120
2254
|
if shutdown_count >= 3 and not restart_detected:
|
|
2121
2255
|
# Confirmed shutdown after 3 consecutive checks
|
|
2122
2256
|
restart_detected = True
|
|
2123
|
-
progress.update(
|
|
2257
|
+
progress.update(
|
|
2258
|
+
task,
|
|
2259
|
+
description="[yellow]⟳ VM restarting after package installation...",
|
|
2260
|
+
)
|
|
2124
2261
|
time.sleep(3)
|
|
2125
2262
|
continue
|
|
2126
2263
|
else:
|
|
@@ -2128,13 +2265,15 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
2128
2265
|
if shutdown_count > 0 and shutdown_count < 3:
|
|
2129
2266
|
# Was a brief glitch, not a real shutdown
|
|
2130
2267
|
shutdown_count = 0
|
|
2131
|
-
|
|
2268
|
+
|
|
2132
2269
|
if restart_detected and "running" in vm_state and shutdown_count >= 3:
|
|
2133
2270
|
# VM restarted successfully - GUI should be ready
|
|
2134
|
-
progress.update(
|
|
2271
|
+
progress.update(
|
|
2272
|
+
task, description=f"[green]✓ GUI ready! Total time: {minutes}m {seconds}s"
|
|
2273
|
+
)
|
|
2135
2274
|
time.sleep(2)
|
|
2136
2275
|
break
|
|
2137
|
-
|
|
2276
|
+
|
|
2138
2277
|
# Estimate remaining time (total ~12-15 minutes for full desktop install)
|
|
2139
2278
|
if elapsed < 60:
|
|
2140
2279
|
remaining = "~12-15 minutes"
|
|
@@ -2146,23 +2285,33 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
2146
2285
|
remaining = "finishing soon..."
|
|
2147
2286
|
else:
|
|
2148
2287
|
remaining = "almost done"
|
|
2149
|
-
|
|
2288
|
+
|
|
2150
2289
|
if restart_detected:
|
|
2151
|
-
progress.update(
|
|
2290
|
+
progress.update(
|
|
2291
|
+
task,
|
|
2292
|
+
description=f"[cyan]Starting GUI... ({minutes}m {seconds}s, {remaining})",
|
|
2293
|
+
)
|
|
2152
2294
|
else:
|
|
2153
|
-
progress.update(
|
|
2154
|
-
|
|
2295
|
+
progress.update(
|
|
2296
|
+
task,
|
|
2297
|
+
description=f"[cyan]Installing desktop packages... ({minutes}m {seconds}s, {remaining})",
|
|
2298
|
+
)
|
|
2299
|
+
|
|
2155
2300
|
except (subprocess.TimeoutExpired, Exception) as e:
|
|
2156
2301
|
elapsed = int(time.time() - start_time)
|
|
2157
2302
|
minutes = elapsed // 60
|
|
2158
2303
|
seconds = elapsed % 60
|
|
2159
|
-
progress.update(
|
|
2160
|
-
|
|
2304
|
+
progress.update(
|
|
2305
|
+
task, description=f"[cyan]Configuring VM... ({minutes}m {seconds}s)"
|
|
2306
|
+
)
|
|
2307
|
+
|
|
2161
2308
|
time.sleep(3)
|
|
2162
|
-
|
|
2309
|
+
|
|
2163
2310
|
# Final status
|
|
2164
2311
|
if time.time() - start_time >= timeout:
|
|
2165
|
-
progress.update(
|
|
2312
|
+
progress.update(
|
|
2313
|
+
task, description="[yellow]⚠ Monitoring timeout - VM continues in background"
|
|
2314
|
+
)
|
|
2166
2315
|
|
|
2167
2316
|
|
|
2168
2317
|
def create_vm_from_config(
|
|
@@ -2175,7 +2324,7 @@ def create_vm_from_config(
|
|
|
2175
2324
|
# Merge paths and app_data_paths
|
|
2176
2325
|
all_paths = config.get("paths", {}).copy()
|
|
2177
2326
|
all_paths.update(config.get("app_data_paths", {}))
|
|
2178
|
-
|
|
2327
|
+
|
|
2179
2328
|
vm_config = VMConfig(
|
|
2180
2329
|
name=config["vm"]["name"],
|
|
2181
2330
|
ram_mb=config["vm"].get("ram_mb", 4096),
|
|
@@ -2211,7 +2360,7 @@ def create_vm_from_config(
|
|
|
2211
2360
|
|
|
2212
2361
|
if start:
|
|
2213
2362
|
cloner.start_vm(vm_config.name, open_viewer=vm_config.gui, console=console)
|
|
2214
|
-
|
|
2363
|
+
|
|
2215
2364
|
# Monitor cloud-init progress if GUI is enabled
|
|
2216
2365
|
if vm_config.gui:
|
|
2217
2366
|
console.print("\n[bold cyan]📊 Monitoring setup progress...[/]")
|
|
@@ -2220,7 +2369,9 @@ def create_vm_from_config(
|
|
|
2220
2369
|
except KeyboardInterrupt:
|
|
2221
2370
|
console.print("\n[yellow]Monitoring stopped. VM continues setup in background.[/]")
|
|
2222
2371
|
except Exception as e:
|
|
2223
|
-
console.print(
|
|
2372
|
+
console.print(
|
|
2373
|
+
f"\n[dim]Note: Could not monitor status ({e}). VM continues setup in background.[/]"
|
|
2374
|
+
)
|
|
2224
2375
|
|
|
2225
2376
|
return vm_uuid
|
|
2226
2377
|
|
|
@@ -2292,17 +2443,19 @@ def cmd_clone(args):
|
|
|
2292
2443
|
# Dry run - show what would be created and exit
|
|
2293
2444
|
if dry_run:
|
|
2294
2445
|
config = yaml.safe_load(yaml_content)
|
|
2295
|
-
console.print(
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2446
|
+
console.print(
|
|
2447
|
+
Panel(
|
|
2448
|
+
f"[bold]VM Name:[/] {config['vm']['name']}\n"
|
|
2449
|
+
f"[bold]RAM:[/] {config['vm'].get('ram_mb', 4096)} MB\n"
|
|
2450
|
+
f"[bold]vCPUs:[/] {config['vm'].get('vcpus', 4)}\n"
|
|
2451
|
+
f"[bold]Network:[/] {config['vm'].get('network_mode', 'auto')}\n"
|
|
2452
|
+
f"[bold]Paths:[/] {len(config.get('paths', {}))} mounts\n"
|
|
2453
|
+
f"[bold]Packages:[/] {len(config.get('packages', []))} packages\n"
|
|
2454
|
+
f"[bold]Services:[/] {len(config.get('services', []))} services",
|
|
2455
|
+
title="[bold cyan]Would create VM[/]",
|
|
2456
|
+
border_style="cyan",
|
|
2457
|
+
)
|
|
2458
|
+
)
|
|
2306
2459
|
console.print("\n[dim]Config preview:[/]")
|
|
2307
2460
|
console.print(Panel(yaml_content, title="[bold].clonebox.yaml[/]", border_style="dim"))
|
|
2308
2461
|
console.print("\n[yellow]ℹ️ Dry run complete. No changes made.[/]")
|
|
@@ -2357,14 +2510,16 @@ def cmd_clone(args):
|
|
|
2357
2510
|
|
|
2358
2511
|
# Show GUI startup info if GUI is enabled
|
|
2359
2512
|
if config.get("vm", {}).get("gui", False):
|
|
2360
|
-
username = config[
|
|
2361
|
-
password = config[
|
|
2513
|
+
username = config["vm"].get("username", "ubuntu")
|
|
2514
|
+
password = config["vm"].get("password", "ubuntu")
|
|
2362
2515
|
console.print("\n[bold yellow]⏰ GUI Setup Process:[/]")
|
|
2363
2516
|
console.print(" [yellow]•[/] Installing desktop environment (~5-10 minutes)")
|
|
2364
2517
|
console.print(" [yellow]•[/] Running health checks on all components")
|
|
2365
2518
|
console.print(" [yellow]•[/] Automatic restart after installation")
|
|
2366
2519
|
console.print(" [yellow]•[/] GUI login screen will appear")
|
|
2367
|
-
console.print(
|
|
2520
|
+
console.print(
|
|
2521
|
+
f" [yellow]•[/] Login: [cyan]{username}[/] / [cyan]{'*' * len(password)}[/] (from .env)"
|
|
2522
|
+
)
|
|
2368
2523
|
console.print("\n[dim]💡 Progress will be monitored automatically below[/]")
|
|
2369
2524
|
|
|
2370
2525
|
# Show health check info
|
|
@@ -2544,7 +2699,9 @@ def main():
|
|
|
2544
2699
|
|
|
2545
2700
|
# Stop command
|
|
2546
2701
|
stop_parser = subparsers.add_parser("stop", help="Stop a VM")
|
|
2547
|
-
stop_parser.add_argument(
|
|
2702
|
+
stop_parser.add_argument(
|
|
2703
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2704
|
+
)
|
|
2548
2705
|
stop_parser.add_argument("--force", "-f", action="store_true", help="Force stop")
|
|
2549
2706
|
stop_parser.add_argument(
|
|
2550
2707
|
"-u",
|
|
@@ -2556,7 +2713,9 @@ def main():
|
|
|
2556
2713
|
|
|
2557
2714
|
# Restart command
|
|
2558
2715
|
restart_parser = subparsers.add_parser("restart", help="Restart a VM (stop and start)")
|
|
2559
|
-
restart_parser.add_argument(
|
|
2716
|
+
restart_parser.add_argument(
|
|
2717
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2718
|
+
)
|
|
2560
2719
|
restart_parser.add_argument(
|
|
2561
2720
|
"-f",
|
|
2562
2721
|
"--force",
|
|
@@ -2578,7 +2737,9 @@ def main():
|
|
|
2578
2737
|
|
|
2579
2738
|
# Delete command
|
|
2580
2739
|
delete_parser = subparsers.add_parser("delete", help="Delete a VM")
|
|
2581
|
-
delete_parser.add_argument(
|
|
2740
|
+
delete_parser.add_argument(
|
|
2741
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2742
|
+
)
|
|
2582
2743
|
delete_parser.add_argument("--yes", "-y", action="store_true", help="Skip confirmation")
|
|
2583
2744
|
delete_parser.add_argument("--keep-storage", action="store_true", help="Keep disk images")
|
|
2584
2745
|
delete_parser.add_argument(
|
|
@@ -2609,7 +2770,9 @@ def main():
|
|
|
2609
2770
|
help="Container engine: auto (default), podman, docker",
|
|
2610
2771
|
)
|
|
2611
2772
|
container_parser.set_defaults(func=lambda args, p=container_parser: p.print_help())
|
|
2612
|
-
container_sub = container_parser.add_subparsers(
|
|
2773
|
+
container_sub = container_parser.add_subparsers(
|
|
2774
|
+
dest="container_command", help="Container commands"
|
|
2775
|
+
)
|
|
2613
2776
|
|
|
2614
2777
|
container_up = container_sub.add_parser("up", help="Start container")
|
|
2615
2778
|
container_up.add_argument(
|
|
@@ -2695,7 +2858,9 @@ def main():
|
|
|
2695
2858
|
|
|
2696
2859
|
# Dashboard command
|
|
2697
2860
|
dashboard_parser = subparsers.add_parser("dashboard", help="Run local dashboard")
|
|
2698
|
-
dashboard_parser.add_argument(
|
|
2861
|
+
dashboard_parser.add_argument(
|
|
2862
|
+
"--port", type=int, default=8080, help="Port to bind (default: 8080)"
|
|
2863
|
+
)
|
|
2699
2864
|
dashboard_parser.set_defaults(func=cmd_dashboard)
|
|
2700
2865
|
|
|
2701
2866
|
# Detect command
|
|
@@ -2770,9 +2935,7 @@ def main():
|
|
|
2770
2935
|
action="store_true",
|
|
2771
2936
|
help="Use user session (qemu:///session)",
|
|
2772
2937
|
)
|
|
2773
|
-
status_parser.add_argument(
|
|
2774
|
-
"--health", "-H", action="store_true", help="Run full health check"
|
|
2775
|
-
)
|
|
2938
|
+
status_parser.add_argument("--health", "-H", action="store_true", help="Run full health check")
|
|
2776
2939
|
status_parser.add_argument(
|
|
2777
2940
|
"--verbose", "-v", action="store_true", help="Show detailed diagnostics (QGA, stderr, etc.)"
|
|
2778
2941
|
)
|
|
@@ -2794,9 +2957,7 @@ def main():
|
|
|
2794
2957
|
diagnose_parser.add_argument(
|
|
2795
2958
|
"--verbose", "-v", action="store_true", help="Show more low-level details"
|
|
2796
2959
|
)
|
|
2797
|
-
diagnose_parser.add_argument(
|
|
2798
|
-
"--json", action="store_true", help="Print diagnostics as JSON"
|
|
2799
|
-
)
|
|
2960
|
+
diagnose_parser.add_argument("--json", action="store_true", help="Print diagnostics as JSON")
|
|
2800
2961
|
diagnose_parser.set_defaults(func=cmd_diagnose)
|
|
2801
2962
|
|
|
2802
2963
|
watch_parser = subparsers.add_parser(
|
|
@@ -2868,8 +3029,10 @@ def main():
|
|
|
2868
3029
|
"-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
|
|
2869
3030
|
)
|
|
2870
3031
|
export_parser.add_argument(
|
|
2871
|
-
"--include-data",
|
|
2872
|
-
|
|
3032
|
+
"--include-data",
|
|
3033
|
+
"-d",
|
|
3034
|
+
action="store_true",
|
|
3035
|
+
help="Include shared data (browser profiles, configs) in export",
|
|
2873
3036
|
)
|
|
2874
3037
|
export_parser.set_defaults(func=cmd_export)
|
|
2875
3038
|
|
|
@@ -2895,9 +3058,7 @@ def main():
|
|
|
2895
3058
|
test_parser.add_argument(
|
|
2896
3059
|
"--quick", action="store_true", help="Quick test (no deep health checks)"
|
|
2897
3060
|
)
|
|
2898
|
-
test_parser.add_argument(
|
|
2899
|
-
"--verbose", "-v", action="store_true", help="Verbose output"
|
|
2900
|
-
)
|
|
3061
|
+
test_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
|
2901
3062
|
test_parser.add_argument(
|
|
2902
3063
|
"--validate", action="store_true", help="Run full validation (mounts, packages, services)"
|
|
2903
3064
|
)
|