clonebox 1.1.17__py3-none-any.whl → 1.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cloner.py CHANGED
@@ -32,6 +32,7 @@ from clonebox.interfaces.disk import DiskManager
32
32
  from clonebox.interfaces.hypervisor import HypervisorBackend
33
33
  from clonebox.interfaces.network import NetworkManager
34
34
  from clonebox.logging import get_logger, log_operation
35
+ from clonebox.policies import PolicyEngine, PolicyViolationError
35
36
  from clonebox.resources import ResourceLimits
36
37
  from clonebox.rollback import vm_creation_transaction
37
38
  from clonebox.secrets import SecretsManager, SSHKeyPair
@@ -231,6 +232,13 @@ class SelectiveVMCloner:
231
232
  url=self.DEFAULT_BASE_IMAGE_URL,
232
233
  )
233
234
 
235
+ policy = PolicyEngine.load_effective()
236
+ if policy is not None:
237
+ try:
238
+ policy.assert_url_allowed(self.DEFAULT_BASE_IMAGE_URL)
239
+ except PolicyViolationError as e:
240
+ raise RuntimeError(str(e)) from e
241
+
234
242
  try:
235
243
  import urllib.request
236
244
 
@@ -302,8 +310,20 @@ class SelectiveVMCloner:
302
310
  "images_dir_writable": False,
303
311
  "images_dir": str(images_dir),
304
312
  "session_type": "user" if self.user_session else "system",
313
+ "genisoimage_installed": False,
314
+ "virt_viewer_installed": False,
315
+ "qemu_img_installed": False,
305
316
  }
306
317
 
318
+ # Check for genisoimage
319
+ checks["genisoimage_installed"] = shutil.which("genisoimage") is not None
320
+
321
+ # Check for virt-viewer
322
+ checks["virt_viewer_installed"] = shutil.which("virt-viewer") is not None
323
+
324
+ # Check for qemu-img
325
+ checks["qemu_img_installed"] = shutil.which("qemu-img") is not None
326
+
307
327
  # Check libvirt connection
308
328
  if self.conn and self.conn.isAlive():
309
329
  checks["libvirt_connected"] = True
@@ -338,8 +358,9 @@ class SelectiveVMCloner:
338
358
  checks["images_dir_error"] = (
339
359
  f"Cannot write to {images_dir}\n"
340
360
  f" Option 1: Run with sudo\n"
341
- f" Option 2: Use --user flag for user session (no root needed)\n"
342
- f" Option 3: Fix permissions: sudo chown -R $USER:libvirt {images_dir}"
361
+ f" Option 2: Use --user flag for user session (recommended):\n"
362
+ f" clonebox clone . --user\n\n"
363
+ f" 3. Fix permissions: sudo chown -R $USER:libvirt {images_dir}"
343
364
  )
344
365
  else:
345
366
  # Try to create it
@@ -355,7 +376,13 @@ class SelectiveVMCloner:
355
376
 
356
377
  return checks
357
378
 
358
- def create_vm(self, config: VMConfig, console=None, replace: bool = False) -> str:
379
+ def create_vm(
380
+ self,
381
+ config: VMConfig,
382
+ console=None,
383
+ replace: bool = False,
384
+ approved: bool = False,
385
+ ) -> str:
359
386
  """
360
387
  Create a VM with only selected applications/paths.
361
388
 
@@ -407,7 +434,19 @@ class SelectiveVMCloner:
407
434
  )
408
435
 
409
436
  log.info(f"VM '{config.name}' already exists - replacing...")
410
- self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
437
+ policy = PolicyEngine.load_effective()
438
+ if policy is not None:
439
+ policy.assert_operation_approved(
440
+ AuditEventType.VM_DELETE.value,
441
+ approved=approved,
442
+ )
443
+ self.delete_vm(
444
+ config.name,
445
+ delete_storage=True,
446
+ console=console,
447
+ ignore_not_found=True,
448
+ approved=approved,
449
+ )
411
450
 
412
451
  # Determine images directory
413
452
  images_dir = self.get_images_dir()
@@ -436,7 +475,7 @@ class SelectiveVMCloner:
436
475
 
437
476
  if config.base_image and Path(config.base_image).exists():
438
477
  # Use backing file for faster creation
439
- log.debug(f"Creating disk with backing file: {config.base_image}")
478
+ log.info(f"Creating root disk ({config.disk_size_gb}GB) using backing file: {config.base_image}")
440
479
  cmd = [
441
480
  "qemu-img",
442
481
  "create",
@@ -451,12 +490,12 @@ class SelectiveVMCloner:
451
490
  ]
452
491
  else:
453
492
  # Create empty disk
454
- log.debug(f"Creating empty {config.disk_size_gb}GB disk...")
493
+ log.info(f"Creating empty {config.disk_size_gb}GB root disk...")
455
494
  cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
456
495
 
457
496
  subprocess.run(cmd, check=True, capture_output=True)
458
497
 
459
- # Create cloud-init ISO if packages/services specified
498
+ # Create cloud-init ISO if packages/services/paths specified
460
499
  cloudinit_iso = None
461
500
  if (
462
501
  config.packages
@@ -464,9 +503,11 @@ class SelectiveVMCloner:
464
503
  or config.snap_packages
465
504
  or config.post_commands
466
505
  or config.gui
506
+ or config.paths
507
+ or config.copy_paths
467
508
  ):
468
509
  cloudinit_iso = ctx.add_file(self._create_cloudinit_iso(vm_dir, config, self.user_session))
469
- log.info(f"Created cloud-init ISO with {len(config.packages)} packages")
510
+ log.info(f"Created cloud-init ISO for VM setup")
470
511
 
471
512
  # Generate VM XML
472
513
  vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
@@ -492,207 +533,11 @@ class SelectiveVMCloner:
492
533
 
493
534
  return vm.UUIDString()
494
535
 
495
- def _generate_vm_xml(
496
- self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
497
- ) -> str:
498
- """Generate libvirt XML for the VM."""
499
-
500
- # Backward compatibility: if called without args, try to derive defaults
501
- if config is None:
502
- config = VMConfig()
503
- if root_disk is None:
504
- root_disk = Path("/var/lib/libvirt/images/default-disk.qcow2")
505
-
506
- # Get resource limits from config or defaults
507
- resource_data = getattr(config, "resources", {})
508
- if not resource_data:
509
- # Fallback to top-level fields
510
- resource_data = {
511
- "cpu": {"vcpus": config.vcpus},
512
- "memory": {"limit": f"{config.ram_mb}M"},
513
- }
514
-
515
- limits = ResourceLimits.from_dict(resource_data)
516
-
517
- root = ET.Element("domain", type="kvm")
518
-
519
- # Basic metadata
520
- ET.SubElement(root, "name").text = config.name
521
- ET.SubElement(root, "uuid").text = str(uuid.uuid4())
522
-
523
- # Memory configuration using limits
524
- limit_kib = limits.memory.limit_bytes // 1024
525
- ET.SubElement(root, "memory", unit="KiB").text = str(limit_kib)
526
- ET.SubElement(root, "currentMemory", unit="KiB").text = str(limit_kib)
527
-
528
- # CPU configuration
529
- ET.SubElement(root, "vcpu", placement="static").text = str(limits.cpu.vcpus)
530
-
531
- # OS configuration
532
- os_elem = ET.SubElement(root, "os")
533
- ET.SubElement(os_elem, "type", arch="x86_64", machine="q35").text = "hvm"
534
- ET.SubElement(os_elem, "boot", dev="hd")
535
-
536
- # Features
537
- features = ET.SubElement(root, "features")
538
- ET.SubElement(features, "acpi")
539
- ET.SubElement(features, "apic")
540
-
541
- # Resource tuning (CPU and Memory)
542
- cputune_xml = limits.cpu.to_libvirt_xml()
543
- if cputune_xml:
544
- # We append pre-generated XML string later or use ET to parse it
545
- # For simplicity with existing ET code, we'll use SubElement for basic ones
546
- # and manual string insertion for complex tuning if needed,
547
- # but let's try to stick to ET where possible.
548
- pass
549
-
550
- # CPU tuning element
551
- # Only available in system session (requires cgroups)
552
- if not self.user_session and (limits.cpu.shares or limits.cpu.quota or limits.cpu.pin):
553
- cputune = ET.SubElement(root, "cputune")
554
- ET.SubElement(cputune, "shares").text = str(limits.cpu.shares)
555
- if limits.cpu.quota:
556
- ET.SubElement(cputune, "period").text = str(limits.cpu.period)
557
- ET.SubElement(cputune, "quota").text = str(limits.cpu.quota)
558
- if limits.cpu.pin:
559
- for idx, cpu in enumerate(limits.cpu.pin):
560
- ET.SubElement(cputune, "vcpupin", vcpu=str(idx), cpuset=str(cpu))
561
-
562
- # Memory tuning element
563
- # Only available in system session (requires cgroups)
564
- if not self.user_session and (limits.memory.soft_limit or limits.memory.swap):
565
- memtune = ET.SubElement(root, "memtune")
566
- ET.SubElement(memtune, "hard_limit", unit="KiB").text = str(limit_kib)
567
- if limits.memory.soft_limit_bytes:
568
- ET.SubElement(memtune, "soft_limit", unit="KiB").text = str(limits.memory.soft_limit_bytes // 1024)
569
- if limits.memory.swap_bytes:
570
- ET.SubElement(memtune, "swap_hard_limit", unit="KiB").text = str(limits.memory.swap_bytes // 1024)
571
-
572
- # CPU
573
- ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
574
-
575
- # Devices
576
- devices = ET.SubElement(root, "devices")
577
-
578
- # Emulator
579
- ET.SubElement(devices, "emulator").text = "/usr/bin/qemu-system-x86_64"
580
-
581
- # Root disk
582
- disk = ET.SubElement(devices, "disk", type="file", device="disk")
583
- ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
584
- ET.SubElement(disk, "source", file=str(root_disk))
585
- ET.SubElement(disk, "target", dev="vda", bus="virtio")
586
-
587
- # Disk I/O tuning
588
- # Only available in system session (requires cgroups)
589
- if not self.user_session and (limits.disk.read_bps or limits.disk.write_bps or limits.disk.read_iops or limits.disk.write_iops):
590
- iotune = ET.SubElement(disk, "iotune")
591
- if limits.disk.read_bps_bytes:
592
- ET.SubElement(iotune, "read_bytes_sec").text = str(limits.disk.read_bps_bytes)
593
- if limits.disk.write_bps_bytes:
594
- ET.SubElement(iotune, "write_bytes_sec").text = str(limits.disk.write_bps_bytes)
595
- if limits.disk.read_iops:
596
- ET.SubElement(iotune, "read_iops_sec").text = str(limits.disk.read_iops)
597
- if limits.disk.write_iops:
598
- ET.SubElement(iotune, "write_iops_sec").text = str(limits.disk.write_iops)
599
-
600
- # Cloud-init ISO
601
- if cloudinit_iso:
602
- cdrom = ET.SubElement(devices, "disk", type="file", device="cdrom")
603
- ET.SubElement(cdrom, "driver", name="qemu", type="raw")
604
- ET.SubElement(cdrom, "source", file=str(cloudinit_iso))
605
- ET.SubElement(cdrom, "target", dev="sda", bus="sata")
606
- ET.SubElement(cdrom, "readonly")
607
-
608
- # 9p filesystem mounts (bind mounts from host)
609
- # Use accessmode="mapped" to allow VM user to access host files regardless of UID
610
- for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
611
- if Path(host_path).exists():
612
- fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
613
- ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
614
- ET.SubElement(fs, "source", dir=host_path)
615
- # Use simple tag names for 9p mounts
616
- tag = f"mount{idx}"
617
- ET.SubElement(fs, "target", dir=tag)
618
-
619
- # 9p filesystem mounts for COPY paths (mounted to temp location for import)
620
- for idx, (host_path, guest_path) in enumerate(config.copy_paths.items()):
621
- if Path(host_path).exists():
622
- fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
623
- ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
624
- ET.SubElement(fs, "source", dir=host_path)
625
- # Use import tag names for copy mounts
626
- tag = f"import{idx}"
627
- ET.SubElement(fs, "target", dir=tag)
628
-
629
- # Network interface
630
- network_mode = self.resolve_network_mode(config)
631
- if network_mode == "user":
632
- iface = ET.SubElement(devices, "interface", type="user")
633
- ET.SubElement(iface, "model", type="virtio")
634
- else:
635
- iface = ET.SubElement(devices, "interface", type="network")
636
- ET.SubElement(iface, "source", network="default")
637
- ET.SubElement(iface, "model", type="virtio")
638
-
639
- # Network bandwidth tuning
640
- if limits.network.inbound or limits.network.outbound:
641
- bandwidth = ET.SubElement(iface, "bandwidth")
642
- if limits.network.inbound_kbps:
643
- # average in KB/s
644
- ET.SubElement(bandwidth, "inbound", average=str(limits.network.inbound_kbps // 8))
645
- if limits.network.outbound_kbps:
646
- ET.SubElement(bandwidth, "outbound", average=str(limits.network.outbound_kbps // 8))
647
-
648
- # Serial console
649
- serial = ET.SubElement(devices, "serial", type="pty")
650
- ET.SubElement(serial, "target", port="0")
651
-
652
- console_elem = ET.SubElement(devices, "console", type="pty")
653
- ET.SubElement(console_elem, "target", type="serial", port="0")
654
-
655
- # Graphics (SPICE)
656
- if config.gui:
657
- graphics = ET.SubElement(
658
- devices, "graphics", type="spice", autoport="yes", listen="127.0.0.1"
659
- )
660
- ET.SubElement(graphics, "listen", type="address", address="127.0.0.1")
661
-
662
- # Video
663
- video = ET.SubElement(devices, "video")
664
- ET.SubElement(video, "model", type="virtio", heads="1", primary="yes")
665
-
666
- # Input devices
667
- ET.SubElement(devices, "input", type="tablet", bus="usb")
668
- ET.SubElement(devices, "input", type="keyboard", bus="usb")
669
-
670
- ET.SubElement(devices, "controller", type="virtio-serial", index="0")
671
-
672
- # Channel for guest agent
673
- channel = ET.SubElement(devices, "channel", type="unix")
674
- ET.SubElement(channel, "source", mode="bind")
675
- ET.SubElement(channel, "target", type="virtio", name="org.qemu.guest_agent.0")
676
-
677
- # Memory balloon
678
- memballoon = ET.SubElement(devices, "memballoon", model="virtio")
679
- ET.SubElement(
680
- memballoon,
681
- "address",
682
- type="pci",
683
- domain="0x0000",
684
- bus="0x00",
685
- slot="0x08",
686
- function="0x0",
687
- )
688
-
689
- return ET.tostring(root, encoding="unicode")
690
-
691
536
  def _generate_boot_diagnostic_script(self, config: VMConfig) -> str:
692
537
  """Generate boot diagnostic script with self-healing capabilities."""
693
538
  import base64
694
539
 
695
- wants_google_chrome = any(
540
+ wants_chrome = any(
696
541
  p == "/home/ubuntu/.config/google-chrome"
697
542
  for p in list((config.paths or {}).values()) + list((config.copy_paths or {}).values())
698
543
  )
@@ -773,7 +618,7 @@ check_snap() {{
773
618
  }}
774
619
 
775
620
  install_snap() {{
776
- timeout 60 snap wait system seed.loaded 2>/dev/null || true
621
+ timeout 120 snap wait system seed.loaded 2>/dev/null || true
777
622
  for i in $(seq 1 $MAX_RETRIES); do
778
623
  snap install "$1" --classic &>>"$LOG" && return 0
779
624
  snap install "$1" &>>"$LOG" && return 0
@@ -967,7 +812,7 @@ for pkg in "${{SNAP_PACKAGES[@]}}"; do
967
812
  [ -z "$pkg" ] && continue
968
813
  APPS_TO_TEST+=("$pkg")
969
814
  done
970
- if [ "{str(wants_google_chrome).lower()}" = "true" ]; then
815
+ if [ "{str(wants_chrome).lower()}" = "true" ]; then
971
816
  APPS_TO_TEST+=("google-chrome")
972
817
  fi
973
818
  if printf '%s\n' "${{APT_PACKAGES[@]}}" | grep -qx "docker.io"; then
@@ -1108,26 +953,28 @@ fi
1108
953
 
1109
954
  # Build package check commands
1110
955
  apt_checks = []
1111
- for pkg in config.packages:
1112
- apt_checks.append(f'check_apt_package "{pkg}"')
956
+ for i, pkg in enumerate(config.packages, 1):
957
+ apt_checks.append(f'check_apt_package "{pkg}" "{i}/{len(config.packages)}"')
1113
958
 
1114
959
  snap_checks = []
1115
- for pkg in config.snap_packages:
1116
- snap_checks.append(f'check_snap_package "{pkg}"')
960
+ for i, pkg in enumerate(config.snap_packages, 1):
961
+ snap_checks.append(f'check_snap_package "{pkg}" "{i}/{len(config.snap_packages)}"')
1117
962
 
1118
963
  service_checks = []
1119
- for svc in config.services:
1120
- service_checks.append(f'check_service "{svc}"')
964
+ for i, svc in enumerate(config.services, 1):
965
+ service_checks.append(f'check_service "{svc}" "{i}/{len(config.services)}"')
1121
966
 
1122
967
  mount_checks = []
1123
- for idx, (host_path, guest_path) in enumerate(config.paths.items()):
1124
- mount_checks.append(f'check_mount "{guest_path}" "mount{idx}"')
968
+ bind_paths = list(config.paths.items())
969
+ for i, (host_path, guest_tag) in enumerate(bind_paths, 1):
970
+ mount_checks.append(f'check_mount "{guest_tag}" "mount{i-1}" "{i}/{len(bind_paths)}"')
1125
971
 
1126
972
  # Add copied paths checks
1127
- copy_paths = config.copy_paths or config.app_data_paths
973
+ copy_paths = config.copy_paths or getattr(config, "app_data_paths", {})
1128
974
  if copy_paths:
1129
- for idx, (host_path, guest_path) in enumerate(copy_paths.items()):
1130
- mount_checks.append(f'check_copy_path "{guest_path}"')
975
+ copy_list = list(copy_paths.items())
976
+ for i, (host_path, guest_path) in enumerate(copy_list, 1):
977
+ mount_checks.append(f'check_copy_path "{guest_path}" "{i}/{len(copy_list)}"')
1131
978
 
1132
979
  apt_checks_str = "\n".join(apt_checks) if apt_checks else "echo 'No apt packages to check'"
1133
980
  snap_checks_str = (
@@ -1138,7 +985,7 @@ fi
1138
985
  )
1139
986
  mount_checks_str = "\n".join(mount_checks) if mount_checks else "echo 'No mounts to check'"
1140
987
 
1141
- script = f"""#!/bin/bash
988
+ script = fr"""#!/bin/bash
1142
989
  # CloneBox Health Check Script
1143
990
  # Generated automatically - validates all installed components
1144
991
 
@@ -1159,21 +1006,47 @@ NC='\\033[0m'
1159
1006
 
1160
1007
  log() {{
1161
1008
  echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$REPORT_FILE"
1009
+ # If it's a PASS/FAIL/INFO/WARN line, also echo with prefix for the monitor
1010
+ if [[ "$1" =~ ^\[(PASS|FAIL|WARN|INFO)\] ]]; then
1011
+ echo " → $1"
1012
+ fi
1013
+ }}
1014
+
1015
+ check_disk_space() {{
1016
+ local usage
1017
+ usage=$(df / --output=pcent | tail -n 1 | tr -dc '0-9')
1018
+ local avail
1019
+ avail=$(df -h / --output=avail | tail -n 1 | tr -d ' ')
1020
+
1021
+ if [ "$usage" -gt 95 ]; then
1022
+ log "[FAIL] Disk space nearly full: ${{usage}}% used ($avail available)"
1023
+ ((FAILED++))
1024
+ return 1
1025
+ elif [ "$usage" -gt 85 ]; then
1026
+ log "[WARN] Disk usage high: ${{usage}}% used ($avail available)"
1027
+ ((WARNINGS++))
1028
+ return 0
1029
+ else
1030
+ log "[PASS] Disk space OK: ${{usage}}% used ($avail available)"
1031
+ ((PASSED++))
1032
+ return 0
1033
+ fi
1162
1034
  }}
1163
1035
 
1164
1036
  check_apt_package() {{
1165
1037
  local pkg="$1"
1038
+ local progress="$2"
1166
1039
  if dpkg -l "$pkg" 2>/dev/null | grep -q "^ii"; then
1167
- log "[PASS] APT package '$pkg' is installed"
1040
+ log "[PASS] [$progress] APT package '$pkg' is installed"
1168
1041
  ((PASSED++))
1169
1042
  return 0
1170
1043
  else
1171
1044
  if [ $SETUP_IN_PROGRESS -eq 1 ]; then
1172
- log "[WARN] APT package '$pkg' is not installed yet"
1045
+ log "[WARN] [$progress] APT package '$pkg' is not installed yet"
1173
1046
  ((WARNINGS++))
1174
1047
  return 1
1175
1048
  else
1176
- log "[FAIL] APT package '$pkg' is NOT installed"
1049
+ log "[FAIL] [$progress] APT package '$pkg' is NOT installed"
1177
1050
  ((FAILED++))
1178
1051
  return 1
1179
1052
  fi
@@ -1182,19 +1055,20 @@ check_apt_package() {{
1182
1055
 
1183
1056
  check_snap_package() {{
1184
1057
  local pkg="$1"
1058
+ local progress="$2"
1185
1059
  local out
1186
1060
  out=$(snap list "$pkg" 2>&1)
1187
1061
  if [ $? -eq 0 ]; then
1188
- log "[PASS] Snap package '$pkg' is installed"
1062
+ log "[PASS] [$progress] Snap package '$pkg' is installed"
1189
1063
  ((PASSED++))
1190
1064
  return 0
1191
1065
  else
1192
1066
  if [ $SETUP_IN_PROGRESS -eq 1 ]; then
1193
- log "[WARN] Snap package '$pkg' is not installed yet"
1067
+ log "[WARN] [$progress] Snap package '$pkg' is not installed yet"
1194
1068
  ((WARNINGS++))
1195
1069
  return 1
1196
1070
  else
1197
- log "[FAIL] Snap package '$pkg' is NOT installed"
1071
+ log "[FAIL] [$progress] Snap package '$pkg' is NOT installed"
1198
1072
  ((FAILED++))
1199
1073
  return 1
1200
1074
  fi
@@ -1203,18 +1077,19 @@ check_snap_package() {{
1203
1077
 
1204
1078
  check_service() {{
1205
1079
  local svc="$1"
1080
+ local progress="$2"
1206
1081
  if systemctl is-enabled "$svc" &>/dev/null; then
1207
1082
  if systemctl is-active "$svc" &>/dev/null; then
1208
- log "[PASS] Service '$svc' is enabled and running"
1083
+ log "[PASS] [$progress] Service '$svc' is enabled and running"
1209
1084
  ((PASSED++))
1210
1085
  return 0
1211
1086
  else
1212
- log "[WARN] Service '$svc' is enabled but not running"
1087
+ log "[WARN] [$progress] Service '$svc' is enabled but not running"
1213
1088
  ((WARNINGS++))
1214
1089
  return 1
1215
1090
  fi
1216
1091
  else
1217
- log "[INFO] Service '$svc' is not enabled (may be optional)"
1092
+ log "[INFO] [$progress] Service '$svc' is not enabled (may be optional)"
1218
1093
  return 0
1219
1094
  fi
1220
1095
  }}
@@ -1222,38 +1097,40 @@ check_service() {{
1222
1097
  check_mount() {{
1223
1098
  local path="$1"
1224
1099
  local tag="$2"
1100
+ local progress="$3"
1225
1101
  if mountpoint -q "$path" 2>/dev/null; then
1226
- log "[PASS] Mount '$path' ($tag) is active"
1102
+ log "[PASS] [$progress] Mount '$path' ($tag) is active"
1227
1103
  ((PASSED++))
1228
1104
  return 0
1229
1105
  elif [ -d "$path" ]; then
1230
- log "[WARN] Directory '$path' exists but not mounted"
1106
+ log "[WARN] [$progress] Directory '$path' exists but not mounted"
1231
1107
  ((WARNINGS++))
1232
1108
  return 1
1233
1109
  else
1234
- log "[INFO] Mount point '$path' does not exist yet"
1110
+ log "[INFO] [$progress] Mount point '$path' does not exist yet"
1235
1111
  return 0
1236
1112
  fi
1237
1113
  }}
1238
1114
 
1239
1115
  check_copy_path() {{
1240
1116
  local path="$1"
1117
+ local progress="$2"
1241
1118
  if [ -d "$path" ]; then
1242
1119
  if [ "$(ls -A "$path" 2>/dev/null | wc -l)" -gt 0 ]; then
1243
- log "[PASS] Path '$path' exists and contains data"
1120
+ log "[PASS] [$progress] Path '$path' exists and contains data"
1244
1121
  ((PASSED++))
1245
1122
  return 0
1246
1123
  else
1247
- log "[WARN] Path '$path' exists but is EMPTY"
1124
+ log "[WARN] [$progress] Path '$path' exists but is EMPTY"
1248
1125
  ((WARNINGS++))
1249
1126
  return 1
1250
1127
  fi
1251
1128
  else
1252
1129
  if [ $SETUP_IN_PROGRESS -eq 1 ]; then
1253
- log "[INFO] Path '$path' not imported yet"
1130
+ log "[INFO] [$progress] Path '$path' not imported yet"
1254
1131
  return 0
1255
1132
  else
1256
- log "[FAIL] Path '$path' MISSING"
1133
+ log "[FAIL] [$progress] Path '$path' MISSING"
1257
1134
  ((FAILED++))
1258
1135
  return 1
1259
1136
  fi
@@ -1283,6 +1160,11 @@ log "VM Name: {config.name}"
1283
1160
  log "Date: $(date)"
1284
1161
  log "=========================================="
1285
1162
 
1163
+ log ""
1164
+ log "--- System Health ---"
1165
+ check_disk_space
1166
+ check_gui
1167
+
1286
1168
  log ""
1287
1169
  log "--- APT Packages ---"
1288
1170
  {apt_checks_str}
@@ -1337,10 +1219,213 @@ fi
1337
1219
  encoded = base64.b64encode(script.encode()).decode()
1338
1220
  return encoded
1339
1221
 
1222
+ def _generate_vm_xml(
1223
+ self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
1224
+ ) -> str:
1225
+ """Generate libvirt XML for the VM."""
1226
+
1227
+ # Backward compatibility: if called without args, try to derive defaults
1228
+ if config is None:
1229
+ config = VMConfig()
1230
+ if root_disk is None:
1231
+ root_disk = Path("/var/lib/libvirt/images/default-disk.qcow2")
1232
+
1233
+ # Get resource limits from config or defaults
1234
+ resource_data = getattr(config, "resources", {})
1235
+ if not resource_data:
1236
+ # Fallback to top-level fields
1237
+ resource_data = {
1238
+ "cpu": {"vcpus": config.vcpus},
1239
+ "memory": {"limit": f"{config.ram_mb}M"},
1240
+ }
1241
+
1242
+ limits = ResourceLimits.from_dict(resource_data)
1243
+
1244
+ root = ET.Element("domain", type="kvm")
1245
+
1246
+ # Basic metadata
1247
+ ET.SubElement(root, "name").text = config.name
1248
+ ET.SubElement(root, "uuid").text = str(uuid.uuid4())
1249
+
1250
+ # Memory configuration using limits
1251
+ limit_kib = limits.memory.limit_bytes // 1024
1252
+ ET.SubElement(root, "memory", unit="KiB").text = str(limit_kib)
1253
+ ET.SubElement(root, "currentMemory", unit="KiB").text = str(limit_kib)
1254
+
1255
+ # CPU configuration
1256
+ ET.SubElement(root, "vcpu", placement="static").text = str(limits.cpu.vcpus)
1257
+
1258
+ # OS configuration
1259
+ os_elem = ET.SubElement(root, "os")
1260
+ ET.SubElement(os_elem, "type", arch="x86_64", machine="q35").text = "hvm"
1261
+ ET.SubElement(os_elem, "boot", dev="hd")
1262
+
1263
+ # Features
1264
+ features = ET.SubElement(root, "features")
1265
+ ET.SubElement(features, "acpi")
1266
+ ET.SubElement(features, "apic")
1267
+
1268
+ # Resource tuning (CPU and Memory)
1269
+ cputune_xml = limits.cpu.to_libvirt_xml()
1270
+ if cputune_xml:
1271
+ # We append pre-generated XML string later or use ET to parse it
1272
+ # For simplicity with existing ET code, we'll use SubElement for basic ones
1273
+ # and manual string insertion for complex tuning if needed,
1274
+ # but let's try to stick to ET where possible.
1275
+ pass
1276
+
1277
+ # CPU tuning element
1278
+ # Only available in system session (requires cgroups)
1279
+ if not self.user_session and (limits.cpu.shares or limits.cpu.quota or limits.cpu.pin):
1280
+ cputune = ET.SubElement(root, "cputune")
1281
+ ET.SubElement(cputune, "shares").text = str(limits.cpu.shares)
1282
+ if limits.cpu.quota:
1283
+ ET.SubElement(cputune, "period").text = str(limits.cpu.period)
1284
+ ET.SubElement(cputune, "quota").text = str(limits.cpu.quota)
1285
+ if limits.cpu.pin:
1286
+ for idx, cpu in enumerate(limits.cpu.pin):
1287
+ ET.SubElement(cputune, "vcpupin", vcpu=str(idx), cpuset=str(cpu))
1288
+
1289
+ # Memory tuning element
1290
+ # Only available in system session (requires cgroups)
1291
+ if not self.user_session and (limits.memory.soft_limit or limits.memory.swap):
1292
+ memtune = ET.SubElement(root, "memtune")
1293
+ ET.SubElement(memtune, "hard_limit", unit="KiB").text = str(limit_kib)
1294
+ if limits.memory.soft_limit_bytes:
1295
+ ET.SubElement(memtune, "soft_limit", unit="KiB").text = str(limits.memory.soft_limit_bytes // 1024)
1296
+ if limits.memory.swap_bytes:
1297
+ ET.SubElement(memtune, "swap_hard_limit", unit="KiB").text = str(limits.memory.swap_bytes // 1024)
1298
+
1299
+ # CPU
1300
+ ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
1301
+
1302
+ # Devices
1303
+ devices = ET.SubElement(root, "devices")
1304
+
1305
+ # Emulator
1306
+ ET.SubElement(devices, "emulator").text = "/usr/bin/qemu-system-x86_64"
1307
+
1308
+ # Root disk
1309
+ disk = ET.SubElement(devices, "disk", type="file", device="disk")
1310
+ ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
1311
+ ET.SubElement(disk, "source", file=str(root_disk))
1312
+ ET.SubElement(disk, "target", dev="vda", bus="virtio")
1313
+
1314
+ # Disk I/O tuning
1315
+ # Only available in system session (requires cgroups)
1316
+ if not self.user_session and (limits.disk.read_bps or limits.disk.write_bps or limits.disk.read_iops or limits.disk.write_iops):
1317
+ iotune = ET.SubElement(disk, "iotune")
1318
+ if limits.disk.read_bps_bytes:
1319
+ ET.SubElement(iotune, "read_bytes_sec").text = str(limits.disk.read_bps_bytes)
1320
+ if limits.disk.write_bps_bytes:
1321
+ ET.SubElement(iotune, "write_bytes_sec").text = str(limits.disk.write_bps_bytes)
1322
+ if limits.disk.read_iops:
1323
+ ET.SubElement(iotune, "read_iops_sec").text = str(limits.disk.read_iops)
1324
+ if limits.disk.write_iops:
1325
+ ET.SubElement(iotune, "write_iops_sec").text = str(limits.disk.write_iops)
1326
+
1327
+ # Cloud-init ISO
1328
+ if cloudinit_iso:
1329
+ cdrom = ET.SubElement(devices, "disk", type="file", device="cdrom")
1330
+ ET.SubElement(cdrom, "driver", name="qemu", type="raw")
1331
+ ET.SubElement(cdrom, "source", file=str(cloudinit_iso))
1332
+ ET.SubElement(cdrom, "target", dev="sda", bus="sata")
1333
+ ET.SubElement(cdrom, "readonly")
1334
+
1335
+ # 9p filesystem mounts (bind mounts from host)
1336
+ # Use accessmode="mapped" to allow VM user to access host files regardless of UID
1337
+ for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
1338
+ if Path(host_path).exists():
1339
+ fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
1340
+ ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
1341
+ ET.SubElement(fs, "source", dir=host_path)
1342
+ # Use simple tag names for 9p mounts
1343
+ tag = f"mount{idx}"
1344
+ ET.SubElement(fs, "target", dir=tag)
1345
+
1346
+ # 9p filesystem mounts for COPY paths (mounted to temp location for import)
1347
+ for idx, (host_path, guest_path) in enumerate(config.copy_paths.items()):
1348
+ if Path(host_path).exists():
1349
+ fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
1350
+ ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
1351
+ ET.SubElement(fs, "source", dir=host_path)
1352
+ # Use import tag names for copy mounts
1353
+ tag = f"import{idx}"
1354
+ ET.SubElement(fs, "target", dir=tag)
1355
+
1356
+ # Network interface
1357
+ network_mode = self.resolve_network_mode(config)
1358
+ if network_mode == "user":
1359
+ iface = ET.SubElement(devices, "interface", type="user")
1360
+ ET.SubElement(iface, "model", type="virtio")
1361
+ else:
1362
+ iface = ET.SubElement(devices, "interface", type="network")
1363
+ ET.SubElement(iface, "source", network="default")
1364
+ ET.SubElement(iface, "model", type="virtio")
1365
+
1366
+ # Network bandwidth tuning
1367
+ if limits.network.inbound or limits.network.outbound:
1368
+ bandwidth = ET.SubElement(iface, "bandwidth")
1369
+ if limits.network.inbound_kbps:
1370
+ # average in KB/s
1371
+ ET.SubElement(bandwidth, "inbound", average=str(limits.network.inbound_kbps // 8))
1372
+ if limits.network.outbound_kbps:
1373
+ ET.SubElement(bandwidth, "outbound", average=str(limits.network.outbound_kbps // 8))
1374
+
1375
+ # Serial console
1376
+ serial = ET.SubElement(devices, "serial", type="pty")
1377
+ ET.SubElement(serial, "target", port="0")
1378
+
1379
+ console_elem = ET.SubElement(devices, "console", type="pty")
1380
+ ET.SubElement(console_elem, "target", type="serial", port="0")
1381
+
1382
+ # Graphics (SPICE)
1383
+ if config.gui:
1384
+ graphics = ET.SubElement(
1385
+ devices, "graphics", type="spice", autoport="yes", listen="127.0.0.1"
1386
+ )
1387
+ ET.SubElement(graphics, "listen", type="address", address="127.0.0.1")
1388
+
1389
+ # Video
1390
+ video = ET.SubElement(devices, "video")
1391
+ ET.SubElement(video, "model", type="virtio", heads="1", primary="yes")
1392
+
1393
+ # Input devices
1394
+ ET.SubElement(devices, "input", type="tablet", bus="usb")
1395
+ ET.SubElement(devices, "input", type="keyboard", bus="usb")
1396
+
1397
+ ET.SubElement(devices, "controller", type="virtio-serial", index="0")
1398
+
1399
+ # Channel for guest agent
1400
+ channel = ET.SubElement(devices, "channel", type="unix")
1401
+ ET.SubElement(channel, "source", mode="bind")
1402
+ ET.SubElement(channel, "target", type="virtio", name="org.qemu.guest_agent.0")
1403
+
1404
+ # Memory balloon
1405
+ memballoon = ET.SubElement(devices, "memballoon", model="virtio")
1406
+ ET.SubElement(
1407
+ memballoon,
1408
+ "address",
1409
+ type="pci",
1410
+ domain="0x0000",
1411
+ bus="0x00",
1412
+ slot="0x08",
1413
+ function="0x0",
1414
+ )
1415
+
1416
+ return ET.tostring(root, encoding="unicode")
1417
+
1340
1418
  def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig, user_session: bool = False) -> Path:
1341
1419
  """Create cloud-init ISO with secure credential handling."""
1342
1420
  secrets_mgr = SecretsManager()
1343
1421
 
1422
+ # Determine if Chrome is wanted early to avoid NameError in helpers
1423
+ wants_chrome = any(
1424
+ p == "/home/ubuntu/.config/google-chrome"
1425
+ for p in list((config.paths or {}).values())
1426
+ + list((config.copy_paths or {}).values())
1427
+ )
1428
+
1344
1429
  # Determine authentication method
1345
1430
  auth_method = getattr(config, "auth_method", "ssh_key")
1346
1431
 
@@ -1406,10 +1491,10 @@ fi
1406
1491
  bind_mount_commands = []
1407
1492
  fstab_entries = []
1408
1493
  all_paths = dict(config.paths) if config.paths else {}
1494
+ existing_bind_paths = {h: g for h, g in all_paths.items() if Path(h).exists()}
1409
1495
  pre_chown_dirs: set[str] = set()
1410
- for idx, (host_path, guest_path) in enumerate(all_paths.items()):
1411
- if Path(host_path).exists():
1412
- # Ensure all parent directories in /home/ubuntu are owned by user
1496
+ for idx, (host_path, guest_path) in enumerate(existing_bind_paths.items()):
1497
+ # Ensure all parent directories in /home/ubuntu are owned by user
1413
1498
  # This prevents "Permission denied" when creating config dirs (e.g. .config) as root
1414
1499
  if str(guest_path).startswith("/home/ubuntu/"):
1415
1500
  try:
@@ -1455,7 +1540,7 @@ fi
1455
1540
  import_mount_commands.append(f" - mkdir -p {temp_mount_point}")
1456
1541
 
1457
1542
  # 2. Mount the 9p share
1458
- import_mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} || true")
1543
+ import_mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} || echo ' → ❌ Failed to mount temporary share {tag}'")
1459
1544
 
1460
1545
  # 3. Ensure target directory exists and permissions are prepared
1461
1546
  if str(guest_path).startswith("/home/ubuntu/"):
@@ -1466,8 +1551,8 @@ fi
1466
1551
 
1467
1552
  # 4. Copy contents (cp -rT to copy contents of source to target)
1468
1553
  # We use || true to ensure boot continues even if copy fails
1469
- import_mount_commands.append(f" - echo 'Importing {host_path} to {guest_path}...'")
1470
- import_mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} || true")
1554
+ import_mount_commands.append(f" - echo 'Importing {host_path} to {guest_path}...'")
1555
+ import_mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} || echo ' → ❌ Failed to copy data to {guest_path}'")
1471
1556
 
1472
1557
  # 5. Fix ownership recursively
1473
1558
  import_mount_commands.append(f" - chown -R 1000:1000 {guest_path}")
@@ -1493,54 +1578,80 @@ fi
1493
1578
  # Build runcmd - services, mounts, snaps, post_commands
1494
1579
  runcmd_lines = []
1495
1580
 
1581
+ # wants_chrome moved to top of method to avoid NameError
1582
+
1496
1583
  # Add detailed logging header
1497
1584
  runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1498
1585
  runcmd_lines.append(" - echo ' CloneBox VM Installation Progress'")
1499
1586
  runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1500
1587
  runcmd_lines.append(" - echo ''")
1588
+
1589
+ # Phase 1: System Optimization (Pre-install)
1590
+ runcmd_lines.append(" - echo '[1/10] 🛠️ Optimizing system resources...'")
1591
+ runcmd_lines.append(" - echo ' → Limiting journal size to 50M'")
1592
+ runcmd_lines.append(" - sed -i 's/^#SystemMaxUse=/SystemMaxUse=50M/' /etc/systemd/journald.conf || true")
1593
+ runcmd_lines.append(" - systemctl restart systemd-journald || true")
1594
+ runcmd_lines.append(" - echo ' → ✓ [1/10] System resources optimized'")
1595
+ runcmd_lines.append(" - echo ''")
1501
1596
 
1502
- # Phase 1: APT Packages
1597
+ # Phase 2: APT Packages
1503
1598
  if all_packages:
1504
- runcmd_lines.append(f" - echo '[1/9] 📦 Installing APT packages ({len(all_packages)} total)...'")
1599
+ runcmd_lines.append(f" - echo '[2/10] 📦 Installing APT packages ({len(all_packages)} total)...'")
1505
1600
  runcmd_lines.append(" - export DEBIAN_FRONTEND=noninteractive")
1601
+ # Check space before starting
1602
+ runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 524288 ]; then echo ' → ⚠️ WARNING: Low disk space (<512MB) before APT install'; fi")
1603
+ runcmd_lines.append(" - echo ' → Updating package repositories...'")
1506
1604
  runcmd_lines.append(" - apt-get update")
1507
1605
  for i, pkg in enumerate(all_packages, 1):
1508
1606
  runcmd_lines.append(f" - echo ' → [{i}/{len(all_packages)}] Installing {pkg}...'")
1509
- runcmd_lines.append(f" - apt-get install -y {pkg} || echo ' ⚠️ Failed to install {pkg}'")
1510
- runcmd_lines.append(" - echo ' ✓ APT packages installed'")
1607
+ runcmd_lines.append(f" - apt-get install -y {pkg} || echo ' → ❌ Failed to install {pkg}'")
1608
+ if pkg == "qemu-guest-agent":
1609
+ runcmd_lines.append(
1610
+ " - systemctl enable --now qemu-guest-agent || echo ' → ❌ Failed to enable qemu-guest-agent'"
1611
+ )
1612
+ runcmd_lines.append(" - apt-get clean")
1613
+ runcmd_lines.append(" - echo ' → ✓ [2/10] APT packages installed'")
1614
+ runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
1511
1615
  runcmd_lines.append(" - echo ''")
1512
1616
  else:
1513
- runcmd_lines.append(" - echo '[1/9] 📦 No APT packages to install'")
1617
+ runcmd_lines.append(" - echo '[2/10] 📦 No APT packages to install'")
1514
1618
  runcmd_lines.append(" - echo ''")
1515
1619
 
1516
- # Phase 2: Core services
1517
- runcmd_lines.append(" - echo '[2/9] 🔧 Enabling core services...'")
1518
- runcmd_lines.append(" - echo ' → qemu-guest-agent'")
1519
- runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || true")
1520
- runcmd_lines.append(" - echo ' → snapd'")
1521
- runcmd_lines.append(" - systemctl enable --now snapd || true")
1620
+ # Phase 3: Core services
1621
+ runcmd_lines.append(" - echo '[3/10] 🔧 Enabling core services...'")
1622
+ runcmd_lines.append(" - echo ' → [1/2] Enabling qemu-guest-agent'")
1623
+ runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || echo ' → ❌ Failed to enable qemu-guest-agent'")
1624
+ runcmd_lines.append(" - echo ' → [2/2] Enabling snapd'")
1625
+ runcmd_lines.append(" - systemctl enable --now snapd || echo ' → ❌ Failed to enable snapd'")
1522
1626
  runcmd_lines.append(" - echo ' → Waiting for snap system seed...'")
1523
1627
  runcmd_lines.append(" - timeout 300 snap wait system seed.loaded || true")
1524
- runcmd_lines.append(" - echo ' ✓ Core services enabled'")
1628
+ runcmd_lines.append(" - echo ' [3/10] Core services enabled'")
1525
1629
  runcmd_lines.append(" - echo ''")
1526
1630
 
1527
- # Phase 3: User services
1528
- runcmd_lines.append(f" - echo '[3/9] 🔧 Enabling user services ({len(config.services)} total)...'")
1631
+ # Phase 4: User services
1632
+ runcmd_lines.append(f" - echo '[4/10] 🔧 Enabling user services ({len(config.services)} total)...'")
1529
1633
  for i, svc in enumerate(config.services, 1):
1530
1634
  runcmd_lines.append(f" - echo ' → [{i}/{len(config.services)}] {svc}'")
1531
- runcmd_lines.append(f" - systemctl enable --now {svc} || true")
1532
- runcmd_lines.append(" - echo ' ✓ User services enabled'")
1635
+ runcmd_lines.append(f" - systemctl enable --now {svc} || echo ' → ❌ Failed to enable {svc}'")
1636
+ runcmd_lines.append(" - echo ' [4/10] User services enabled'")
1533
1637
  runcmd_lines.append(" - echo ''")
1534
1638
 
1535
- # Phase 4: Filesystem mounts
1536
- runcmd_lines.append(f" - echo '[4/9] 📁 Mounting shared directories ({len(config.paths)} mounts)...'")
1639
+ # Phase 5: Filesystem mounts
1640
+ runcmd_lines.append(f" - echo '[5/10] 📁 Mounting shared directories ({len(existing_bind_paths)} mounts)...'")
1537
1641
  if bind_mount_commands:
1642
+ mount_idx = 0
1538
1643
  for cmd in bind_mount_commands:
1539
1644
  if "mount -t 9p" in cmd:
1645
+ mount_idx += 1
1540
1646
  # Extract mount point for logging
1541
- parts = cmd.split()
1542
- mp = parts[-2] if len(parts) > 2 else "path"
1543
- runcmd_lines.append(f" - echo ' → Mounting {mp}...'")
1647
+ parts = cmd.strip().split()
1648
+ # Look for the path before '||'
1649
+ try:
1650
+ sep_idx = parts.index("||")
1651
+ mp = parts[sep_idx - 1]
1652
+ except ValueError:
1653
+ mp = parts[-1]
1654
+ runcmd_lines.append(f" - echo ' → [{mount_idx}/{len(existing_bind_paths)}] Mounting {mp}...'")
1544
1655
  runcmd_lines.append(cmd)
1545
1656
 
1546
1657
  if fstab_entries:
@@ -1551,163 +1662,110 @@ fi
1551
1662
  runcmd_lines.append(
1552
1663
  f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab"
1553
1664
  )
1554
- runcmd_lines.append(" - mount -a || true")
1555
- runcmd_lines.append(" - echo ' ✓ Mounts configured'")
1665
+ runcmd_lines.append(" - mount -a || echo ' → ❌ Failed to mount shared directories'")
1666
+ runcmd_lines.append(" - echo ' [5/10] Mounts configured'")
1556
1667
  runcmd_lines.append(" - echo ''")
1557
1668
 
1558
- # Phase 5: Data Import (copied paths)
1669
+ # Phase 6: Data Import (copied paths)
1559
1670
  if existing_copy_paths:
1560
- runcmd_lines.append(f" - echo '[5/9] 📥 Importing data ({len(existing_copy_paths)} paths)...'")
1671
+ runcmd_lines.append(f" - echo '[6/10] 📥 Importing data ({len(existing_copy_paths)} paths)...'")
1672
+ # Check space before starting large import
1673
+ runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 1048576 ]; then echo ' → ⚠️ WARNING: Low disk space (<1GB) before data import'; fi")
1561
1674
  # Add import commands with progress
1562
1675
  import_count = 0
1563
1676
  for cmd in import_mount_commands:
1564
1677
  if "Importing" in cmd:
1565
1678
  import_count += 1
1566
- runcmd_lines.append(cmd.replace("Importing", f" → [{import_count}/{len(existing_copy_paths)}] Importing"))
1679
+ # Replace the placeholder 'Importing' with numbered progress, ensuring no double prefix
1680
+ msg = cmd.replace(" - echo ' → Importing", f" - echo ' → [{import_count}/{len(existing_copy_paths)}] Importing")
1681
+ runcmd_lines.append(msg)
1567
1682
  else:
1568
1683
  runcmd_lines.append(cmd)
1569
- runcmd_lines.append(" - echo ' ✓ Data import completed'")
1684
+ runcmd_lines.append(" - echo ' [6/10] Data import completed'")
1685
+ runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
1570
1686
  runcmd_lines.append(" - echo ''")
1571
1687
  else:
1572
- runcmd_lines.append(" - echo '[5/9] 📥 No data to import'")
1688
+ runcmd_lines.append(" - echo '[6/10] 📥 No data to import'")
1573
1689
  runcmd_lines.append(" - echo ''")
1574
1690
 
1575
- # Phase 6: GUI Environment Setup
1691
+ # Phase 7: GUI Environment Setup
1576
1692
  if config.gui:
1577
- runcmd_lines.append(" - echo '[6/9] 🖥️ Setting up GUI environment...'")
1693
+ runcmd_lines.append(" - echo '[7/10] 🖥️ Setting up GUI environment...'")
1578
1694
  runcmd_lines.append(" - echo ' → Creating user directories'")
1579
1695
  # Create directories that GNOME services need
1696
+ gui_dirs = [
1697
+ f"/home/{config.username}/.config/pulse",
1698
+ f"/home/{config.username}/.cache/ibus",
1699
+ f"/home/{config.username}/.local/share",
1700
+ f"/home/{config.username}/.config/dconf",
1701
+ f"/home/{config.username}/.cache/tracker3",
1702
+ f"/home/{config.username}/.config/autostart",
1703
+ ]
1704
+ for i, d in enumerate(gui_dirs, 1):
1705
+ runcmd_lines.append(f" - mkdir -p {d} && echo ' → [{i}/{len(gui_dirs)}] Created {d}'")
1706
+
1580
1707
  runcmd_lines.extend(
1581
1708
  [
1582
- " - mkdir -p /home/ubuntu/.config/pulse /home/ubuntu/.cache/ibus /home/ubuntu/.local/share",
1583
- " - mkdir -p /home/ubuntu/.config/dconf /home/ubuntu/.cache/tracker3",
1584
- " - mkdir -p /home/ubuntu/.config/autostart",
1585
- " - chown -R 1000:1000 /home/ubuntu/.config /home/ubuntu/.cache /home/ubuntu/.local",
1586
- " - chmod 700 /home/ubuntu/.config /home/ubuntu/.cache",
1709
+ f" - chown -R 1000:1000 /home/{config.username}/.config /home/{config.username}/.cache /home/{config.username}/.local",
1710
+ f" - chmod 700 /home/{config.username}/.config /home/{config.username}/.cache",
1587
1711
  " - systemctl set-default graphical.target",
1588
1712
  " - echo ' → Starting display manager'",
1589
1713
  ]
1590
1714
  )
1591
1715
  runcmd_lines.append(" - systemctl enable --now gdm3 || systemctl enable --now gdm || true")
1592
1716
  runcmd_lines.append(" - systemctl start display-manager || true")
1593
- runcmd_lines.append(" - echo ' ✓ GUI environment ready'")
1717
+ runcmd_lines.append(" - echo ' [7/10] GUI environment ready'")
1594
1718
  runcmd_lines.append(" - echo ''")
1595
1719
  else:
1596
- runcmd_lines.append(" - echo '[6/9] 🖥️ No GUI requested'")
1720
+ runcmd_lines.append(" - echo '[7/10] 🖥️ No GUI requested'")
1597
1721
  runcmd_lines.append(" - echo ''")
1598
1722
 
1599
- runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu || true")
1600
- runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/snap || true")
1723
+ runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username} || true")
1724
+ runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/snap || true")
1601
1725
 
1602
- # Phase 7: Snap packages
1726
+ # Phase 8: Snap packages
1603
1727
  if config.snap_packages:
1604
- runcmd_lines.append(f" - echo '[7/9] 📦 Installing snap packages ({len(config.snap_packages)} packages)...'")
1728
+ runcmd_lines.append(f" - echo '[8/10] 📦 Installing snap packages ({len(config.snap_packages)} packages)...'")
1729
+ # Check space before starting snap installation
1730
+ runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 2097152 ]; then echo ' → ⚠️ WARNING: Low disk space (<2GB) before Snap install'; fi")
1605
1731
  for i, snap_pkg in enumerate(config.snap_packages, 1):
1606
1732
  runcmd_lines.append(f" - echo ' → [{i}/{len(config.snap_packages)}] {snap_pkg}'")
1607
1733
  # Try classic first, then strict, with retries
1608
1734
  cmd = (
1609
1735
  f"for i in 1 2 3; do "
1610
- f"snap install {snap_pkg} --classic && echo ' ✓ {snap_pkg} installed (classic)' && break || "
1611
- f"snap install {snap_pkg} && echo ' ✓ {snap_pkg} installed' && break || "
1612
- f"echo ' ⟳ Retry $i/3...' && sleep 10; "
1736
+ f"snap install {snap_pkg} --classic && echo '✓ {snap_pkg} installed (classic)' && break || "
1737
+ f"snap install {snap_pkg} && echo '✓ {snap_pkg} installed' && break || "
1738
+ f"{{ if [ $i -eq 3 ]; then echo ' → ❌ Failed to install {snap_pkg} after 3 attempts'; else echo ' → ⟳ Retry $i/3...' && sleep 10; fi; }} "
1613
1739
  f"done"
1614
1740
  )
1615
1741
  runcmd_lines.append(f" - {cmd}")
1616
- runcmd_lines.append(" - echo ' ✓ Snap packages installed'")
1617
- runcmd_lines.append(" - echo ''")
1618
-
1619
- # Connect snap interfaces for GUI apps (not auto-connected via cloud-init)
1620
- runcmd_lines.append(f" - echo ' 🔌 Connecting snap interfaces...'")
1621
- for snap_pkg in config.snap_packages:
1622
- runcmd_lines.append(f" - echo ' → {snap_pkg}'")
1623
- interfaces = SNAP_INTERFACES.get(snap_pkg, DEFAULT_SNAP_INTERFACES)
1624
- for iface in interfaces:
1625
- runcmd_lines.append(
1626
- f" - snap connect {snap_pkg}:{iface} :{iface} 2>/dev/null || true"
1627
- )
1628
- runcmd_lines.append(" - echo ' ✓ Snap interfaces connected'")
1629
- runcmd_lines.append(" - systemctl restart snapd || true")
1630
- runcmd_lines.append(" - echo ''")
1631
- else:
1632
- runcmd_lines.append(" - echo '[7/9] 📦 No snap packages to install'")
1633
- runcmd_lines.append(" - echo ''")
1634
-
1635
- # Add remaining GUI setup if enabled
1636
- if config.gui:
1637
- runcmd_lines.append(" - echo ' ⚙️ Creating autostart entries...'")
1638
- # Create autostart entries for GUI apps
1639
- autostart_apps = {
1640
- "pycharm-community": (
1641
- "PyCharm Community",
1642
- "/snap/bin/pycharm-community",
1643
- "pycharm-community",
1644
- ),
1645
- "firefox": ("Firefox", "/snap/bin/firefox", "firefox"),
1646
- "chromium": ("Chromium", "/snap/bin/chromium", "chromium"),
1647
- "google-chrome": ("Google Chrome", "google-chrome-stable", "google-chrome"),
1648
- }
1649
-
1650
- for snap_pkg in config.snap_packages:
1651
- if snap_pkg in autostart_apps:
1652
- name, exec_cmd, icon = autostart_apps[snap_pkg]
1653
- desktop_entry = f"""[Desktop Entry]
1654
- Type=Application
1655
- Name={name}
1656
- Exec={exec_cmd}
1657
- Icon={icon}
1658
- X-GNOME-Autostart-enabled=true
1659
- X-GNOME-Autostart-Delay=5
1660
- Comment=CloneBox autostart
1661
- """
1662
- import base64
1663
-
1664
- desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
1665
- runcmd_lines.append(
1666
- f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/{snap_pkg}.desktop"
1667
- )
1668
-
1669
- # Check if google-chrome is in paths (app_data_paths)
1670
- wants_chrome = any("/google-chrome" in str(p) for p in (config.paths or {}).values())
1671
- if wants_chrome:
1672
- name, exec_cmd, icon = autostart_apps["google-chrome"]
1673
- desktop_entry = f"""[Desktop Entry]
1674
- Type=Application
1675
- Name={name}
1676
- Exec={exec_cmd}
1677
- Icon={icon}
1678
- X-GNOME-Autostart-enabled=true
1679
- X-GNOME-Autostart-Delay=5
1680
- Comment=CloneBox autostart
1681
- """
1682
- desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
1683
- runcmd_lines.append(
1684
- f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/google-chrome.desktop"
1685
- )
1686
-
1687
- # Fix ownership of autostart directory
1688
- runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/.config/autostart")
1689
- runcmd_lines.append(" - echo ' ✓ Autostart entries created'")
1690
- runcmd_lines.append(" - echo ''")
1691
-
1692
- # Phase 8: Post commands
1742
+ runcmd_lines.append(" - echo ' [8/10] Snap packages installed'")
1743
+ runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
1744
+
1745
+ # Phase 9: Post commands
1693
1746
  if config.post_commands:
1694
- runcmd_lines.append(f" - echo '[8/9] ⚙️ Running post-setup commands ({len(config.post_commands)} commands)...'")
1747
+ runcmd_lines.append(f" - echo '[9/10] ⚙️ Running post-setup commands ({len(config.post_commands)} total)...'")
1695
1748
  for i, cmd in enumerate(config.post_commands, 1):
1696
1749
  # Truncate long commands for display
1697
1750
  display_cmd = cmd[:60] + '...' if len(cmd) > 60 else cmd
1698
1751
  runcmd_lines.append(f" - echo ' → [{i}/{len(config.post_commands)}] {display_cmd}'")
1699
- runcmd_lines.append(f" - {cmd}")
1700
- runcmd_lines.append(f" - echo ' ✓ Command {i} completed'")
1701
- runcmd_lines.append(" - echo ' ✓ Post-setup commands completed'")
1752
+ runcmd_lines.append(f" - {cmd} || echo ' → ❌ Command {i} failed'")
1753
+ runcmd_lines.append(f" - echo '✓ Command {i} completed'")
1754
+ runcmd_lines.append(" - echo ' [9/10] Post-setup commands finished'")
1702
1755
  runcmd_lines.append(" - echo ''")
1703
1756
  else:
1704
- runcmd_lines.append(" - echo '[8/9] ⚙️ No post-setup commands'")
1757
+ runcmd_lines.append(" - echo '[9/10] ⚙️ No post-setup commands'")
1705
1758
  runcmd_lines.append(" - echo ''")
1706
1759
 
1707
1760
  # Generate health check script
1708
1761
  health_script = self._generate_health_check_script(config)
1709
- # Phase 9: Health checks and finalization
1710
- runcmd_lines.append(" - echo '[9/9] 🏥 Running health checks...'")
1762
+ # Phase 10: Health checks and finalization
1763
+ runcmd_lines.append(" - echo '[10/10] 🏥 Running health checks and final cleanup...'")
1764
+ runcmd_lines.append(" - echo ' → Vacuuming system logs'")
1765
+ runcmd_lines.append(" - journalctl --vacuum-size=50M >/dev/null 2>&1 || true")
1766
+ runcmd_lines.append(" - echo ' → Checking final disk usage'")
1767
+ runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
1768
+
1711
1769
  runcmd_lines.append(
1712
1770
  f" - echo '{health_script}' | base64 -d > /usr/local/bin/clonebox-health"
1713
1771
  )
@@ -1715,7 +1773,7 @@ Comment=CloneBox autostart
1715
1773
  runcmd_lines.append(
1716
1774
  " - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1 || true"
1717
1775
  )
1718
- runcmd_lines.append(" - echo ' ✓ Health checks completed'")
1776
+ runcmd_lines.append(" - echo ' [10/10] Health checks completed'")
1719
1777
  runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
1720
1778
 
1721
1779
  # Final status
@@ -1791,7 +1849,7 @@ set -uo pipefail
1791
1849
 
1792
1850
  RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' CYAN='\033[0;36m' NC='\033[0m' BOLD='\033[1m'
1793
1851
 
1794
- show_help() {
1852
+ show_help() {{
1795
1853
  echo -e "${BOLD}${CYAN}CloneBox Repair Utility${NC}"
1796
1854
  echo ""
1797
1855
  echo "Usage: clonebox-repair [OPTION]"
@@ -1809,9 +1867,10 @@ show_help() {
1809
1867
  echo " --help Show this help message"
1810
1868
  echo ""
1811
1869
  echo "Without options, shows interactive menu."
1812
- }
1870
+ }}
1813
1871
 
1814
- show_status() {
1872
+ show_status() {{
1873
+ echo ""
1815
1874
  echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1816
1875
  echo -e "${BOLD}${CYAN} CloneBox VM Status${NC}"
1817
1876
  echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
@@ -1830,17 +1889,17 @@ show_status() {
1830
1889
  echo ""
1831
1890
  echo -e " Last boot diagnostic: $(stat -c %y /var/log/clonebox-boot.log 2>/dev/null || echo 'never')"
1832
1891
  echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1833
- }
1892
+ }}
1834
1893
 
1835
- show_logs() {
1894
+ show_logs() {{
1836
1895
  echo -e "${BOLD}Recent repair logs:${NC}"
1837
1896
  echo ""
1838
1897
  tail -n 50 /var/log/clonebox-boot.log 2>/dev/null || echo "No logs found"
1839
- }
1898
+ }}
1840
1899
 
1841
- fix_permissions() {
1900
+ fix_permissions() {{
1842
1901
  echo -e "${CYAN}Fixing directory permissions...${NC}"
1843
- VM_USER="${SUDO_USER:-ubuntu}"
1902
+ VM_USER="${{SUDO_USER:-ubuntu}}"
1844
1903
  VM_HOME="/home/$VM_USER"
1845
1904
 
1846
1905
  DIRS_TO_CREATE=(
@@ -1858,7 +1917,7 @@ fix_permissions() {
1858
1917
  "$VM_HOME/.local/share/keyrings"
1859
1918
  )
1860
1919
 
1861
- for dir in "${DIRS_TO_CREATE[@]}"; do
1920
+ for dir in "${{DIRS_TO_CREATE[@]}}"; do
1862
1921
  if [ ! -d "$dir" ]; then
1863
1922
  mkdir -p "$dir" 2>/dev/null && echo " Created $dir"
1864
1923
  fi
@@ -1872,11 +1931,11 @@ fix_permissions() {
1872
1931
  done
1873
1932
 
1874
1933
  echo -e "${GREEN}✅ Permissions fixed${NC}"
1875
- }
1934
+ }}
1876
1935
 
1877
- fix_audio() {
1936
+ fix_audio() {{
1878
1937
  echo -e "${CYAN}Fixing audio (PulseAudio/PipeWire)...${NC}"
1879
- VM_USER="${SUDO_USER:-ubuntu}"
1938
+ VM_USER="${{SUDO_USER:-ubuntu}}"
1880
1939
  VM_HOME="/home/$VM_USER"
1881
1940
 
1882
1941
  # Create pulse config directory with correct permissions
@@ -1901,11 +1960,11 @@ fix_audio() {
1901
1960
  systemctl --user restart pipewire pipewire-pulse 2>/dev/null || true
1902
1961
 
1903
1962
  echo -e "${GREEN}✅ Audio fixed${NC}"
1904
- }
1963
+ }}
1905
1964
 
1906
- fix_keyring() {
1965
+ fix_keyring() {{
1907
1966
  echo -e "${CYAN}Resetting GNOME Keyring...${NC}"
1908
- VM_USER="${SUDO_USER:-ubuntu}"
1967
+ VM_USER="${{SUDO_USER:-ubuntu}}"
1909
1968
  VM_HOME="/home/$VM_USER"
1910
1969
  KEYRING_DIR="$VM_HOME/.local/share/keyrings"
1911
1970
 
@@ -1933,11 +1992,11 @@ fix_keyring() {
1933
1992
  pkill -u "$VM_USER" gnome-keyring-daemon 2>/dev/null || true
1934
1993
 
1935
1994
  echo -e "${GREEN}✅ Keyring reset - log out and back in to create new keyring${NC}"
1936
- }
1995
+ }}
1937
1996
 
1938
- fix_ibus() {
1997
+ fix_ibus() {{
1939
1998
  echo -e "${CYAN}Fixing IBus input method...${NC}"
1940
- VM_USER="${SUDO_USER:-ubuntu}"
1999
+ VM_USER="${{SUDO_USER:-ubuntu}}"
1941
2000
  VM_HOME="/home/$VM_USER"
1942
2001
 
1943
2002
  # Create ibus cache directory
@@ -1953,25 +2012,29 @@ fix_ibus() {
1953
2012
  fi
1954
2013
 
1955
2014
  echo -e "${GREEN}✅ IBus fixed${NC}"
1956
- }
2015
+ }}
1957
2016
 
1958
- fix_snaps() {
2017
+ fix_snaps() {{
1959
2018
  echo -e "${CYAN}Reconnecting snap interfaces...${NC}"
1960
2019
  IFACES="desktop desktop-legacy x11 wayland home network audio-playback audio-record camera opengl"
1961
2020
 
1962
2021
  for snap in $(snap list --color=never 2>/dev/null | tail -n +2 | awk '{print $1}'); do
1963
- [[ "$snap" =~ ^(core|snapd|gnome-|gtk-|mesa-) ]] && continue
1964
- echo -e " ${YELLOW}$snap${NC}"
1965
- for iface in $IFACES; do
1966
- snap connect "$snap:$iface" ":$iface" 2>/dev/null && echo " ✓ $iface" || true
1967
- done
2022
+ case "$snap" in
2023
+ pycharm-community|chromium|firefox|code|slack|spotify)
2024
+ echo "Connecting interfaces for $snap..."
2025
+ IFACES="desktop desktop-legacy x11 wayland home network network-bind audio-playback"
2026
+ for iface in $IFACES; do
2027
+ snap connect "$snap:$iface" ":$iface" 2>/dev/null || true
2028
+ done
2029
+ ;;
2030
+ esac
1968
2031
  done
1969
2032
 
1970
2033
  systemctl restart snapd 2>/dev/null || true
1971
2034
  echo -e "${GREEN}✅ Snap interfaces reconnected${NC}"
1972
- }
2035
+ }}
1973
2036
 
1974
- fix_mounts() {
2037
+ fix_mounts() {{
1975
2038
  echo -e "${CYAN}Remounting filesystems...${NC}"
1976
2039
 
1977
2040
  while IFS= read -r line; do
@@ -1992,9 +2055,9 @@ fix_mounts() {
1992
2055
  done < /etc/fstab
1993
2056
 
1994
2057
  echo -e "${GREEN}✅ Mounts checked${NC}"
1995
- }
2058
+ }}
1996
2059
 
1997
- fix_all() {
2060
+ fix_all() {{
1998
2061
  echo -e "${BOLD}${CYAN}Running all fixes...${NC}"
1999
2062
  echo ""
2000
2063
  fix_permissions
@@ -2008,9 +2071,9 @@ fix_all() {
2008
2071
  fix_mounts
2009
2072
  echo ""
2010
2073
  echo -e "${BOLD}${GREEN}All fixes completed!${NC}"
2011
- }
2074
+ }}
2012
2075
 
2013
- interactive_menu() {
2076
+ interactive_menu() {{
2014
2077
  while true; do
2015
2078
  echo ""
2016
2079
  echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
@@ -2044,7 +2107,7 @@ interactive_menu() {
2044
2107
  *) echo -e "${RED}Invalid option${NC}" ;;
2045
2108
  esac
2046
2109
  done
2047
- }
2110
+ }}
2048
2111
 
2049
2112
  # Main
2050
2113
  case "${1:-}" in
@@ -2454,32 +2517,28 @@ if __name__ == "__main__":
2454
2517
  # Note: The bash monitor is already installed above, no need to install Python monitor
2455
2518
 
2456
2519
  # Create logs disk for host access
2457
- # Use different paths based on session type
2458
- if user_session:
2459
- logs_disk_path = str(Path.home() / ".local/share/libvirt/images/clonebox-logs.qcow2")
2460
- else:
2461
- logs_disk_path = "/var/lib/libvirt/images/clonebox-logs.qcow2"
2520
+ # Inside the VM, we use a fixed path for the image file
2521
+ vm_logs_img_path = "/var/lib/clonebox/logs.img"
2462
2522
 
2463
2523
  runcmd_lines.extend(
2464
2524
  [
2465
- " - mkdir -p /mnt/logs",
2466
- f" - truncate -s 1G {logs_disk_path}",
2467
- f" - mkfs.ext4 -F {logs_disk_path}",
2468
- f" - echo '{logs_disk_path} /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
2469
- " - mount -a",
2525
+ " - mkdir -p /var/lib/clonebox /mnt/logs",
2526
+ f" - truncate -s 1G {vm_logs_img_path}",
2527
+ f" - mkfs.ext4 -F {vm_logs_img_path} >/dev/null 2>&1",
2528
+ f" - echo '{vm_logs_img_path} /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
2529
+ " - mount /mnt/logs || echo ' → ❌ Failed to mount logs disk'",
2470
2530
  " - mkdir -p /mnt/logs/var/log",
2471
2531
  " - mkdir -p /mnt/logs/tmp",
2472
2532
  " - cp -r /var/log/clonebox*.log /mnt/logs/var/log/ 2>/dev/null || true",
2473
2533
  " - cp -r /tmp/*-error.log /mnt/logs/tmp/ 2>/dev/null || true",
2474
- f" - echo 'Logs disk mounted at /mnt/logs - accessible from host as {logs_disk_path}'",
2475
- f" - \"echo 'To view logs on host: sudo mount -o loop {logs_disk_path} /mnt/clonebox-logs'\"",
2534
+ f" - echo ' → ✓ Logs disk mounted at /mnt/logs'",
2476
2535
  ]
2477
2536
  )
2478
2537
 
2479
2538
  # Add reboot command at the end if GUI is enabled
2480
2539
  if config.gui:
2481
- runcmd_lines.append(" - echo '🔄 Rebooting in 10 seconds to start GUI...'")
2482
- runcmd_lines.append(" - echo ' (After reboot, GUI will auto-start)'")
2540
+ runcmd_lines.append(" - echo '🔄 Rebooting in 10 seconds to start GUI...'")
2541
+ runcmd_lines.append(" - echo '(After reboot, GUI will auto-start)'")
2483
2542
  runcmd_lines.append(" - sleep 10 && reboot")
2484
2543
 
2485
2544
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
@@ -2581,12 +2640,17 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
2581
2640
  log("[green]✅ VM started![/]")
2582
2641
 
2583
2642
  if open_viewer:
2584
- log("[cyan]🖥️ Opening virt-viewer...[/]")
2585
- subprocess.Popen(
2586
- ["virt-viewer", "-c", self.conn_uri, vm_name],
2587
- stdout=subprocess.DEVNULL,
2588
- stderr=subprocess.DEVNULL,
2589
- )
2643
+ import shutil
2644
+ if shutil.which("virt-viewer"):
2645
+ log("[cyan]🖥️ Opening virt-viewer...[/]")
2646
+ subprocess.Popen(
2647
+ ["virt-viewer", "-c", self.conn_uri, vm_name],
2648
+ stdout=subprocess.DEVNULL,
2649
+ stderr=subprocess.DEVNULL,
2650
+ )
2651
+ else:
2652
+ log("[yellow]⚠️ Warning: 'virt-viewer' not found. Cannot open console automatically.[/]")
2653
+ log("[dim] Install it with: sudo apt install virt-viewer[/]")
2590
2654
 
2591
2655
  return True
2592
2656
 
@@ -2602,6 +2666,8 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
2602
2666
  try:
2603
2667
  vm = self.conn.lookupByName(vm_name)
2604
2668
  except libvirt.libvirtError:
2669
+ if ignore_not_found:
2670
+ return True
2605
2671
  log(f"[red]❌ VM '{vm_name}' not found[/]")
2606
2672
  return False
2607
2673
 
@@ -2649,6 +2715,7 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
2649
2715
  delete_storage: bool = True,
2650
2716
  console=None,
2651
2717
  ignore_not_found: bool = False,
2718
+ approved: bool = False,
2652
2719
  ) -> bool:
2653
2720
  """Delete a VM and optionally its storage."""
2654
2721
 
@@ -2658,9 +2725,18 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
2658
2725
  else:
2659
2726
  print(msg)
2660
2727
 
2728
+ policy = PolicyEngine.load_effective()
2729
+ if policy is not None:
2730
+ policy.assert_operation_approved(
2731
+ AuditEventType.VM_DELETE.value,
2732
+ approved=approved,
2733
+ )
2734
+
2661
2735
  try:
2662
2736
  vm = self.conn.lookupByName(vm_name)
2663
2737
  except libvirt.libvirtError:
2738
+ if ignore_not_found:
2739
+ return True
2664
2740
  log(f"[red]❌ VM '{vm_name}' not found[/]")
2665
2741
  return False
2666
2742