clonebox 1.1.14__py3-none-any.whl → 1.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of clonebox might be problematic. Click here for more details.

clonebox/cloner.py CHANGED
@@ -35,6 +35,7 @@ from clonebox.logging import get_logger, log_operation
35
35
  from clonebox.resources import ResourceLimits
36
36
  from clonebox.rollback import vm_creation_transaction
37
37
  from clonebox.secrets import SecretsManager, SSHKeyPair
38
+ from clonebox.audit import get_audit_logger, AuditEventType, AuditOutcome
38
39
 
39
40
  log = get_logger(__name__)
40
41
 
@@ -365,121 +366,131 @@ class SelectiveVMCloner:
365
366
  Returns:
366
367
  UUID of created VM
367
368
  """
368
- with log_operation(
369
- log, "vm.create", vm_name=config.name, ram_mb=config.ram_mb
370
- ):
371
- with vm_creation_transaction(self, config, console) as ctx:
372
- # If VM already exists, optionally replace it
373
- existing_vm = None
374
- try:
375
- candidate_vm = self.conn.lookupByName(config.name)
376
- if candidate_vm is not None:
377
- try:
378
- if hasattr(candidate_vm, "name") and callable(candidate_vm.name):
379
- if candidate_vm.name() == config.name:
369
+ audit = get_audit_logger()
370
+ with audit.operation(
371
+ AuditEventType.VM_CREATE,
372
+ target_type="vm",
373
+ target_name=config.name,
374
+ ) as audit_ctx:
375
+ audit_ctx.add_detail("ram_mb", config.ram_mb)
376
+ audit_ctx.add_detail("vcpus", config.vcpus)
377
+ audit_ctx.add_detail("disk_size_gb", config.disk_size_gb)
378
+
379
+ with log_operation(
380
+ log, "vm.create", vm_name=config.name, ram_mb=config.ram_mb
381
+ ):
382
+ with vm_creation_transaction(self, config, console) as ctx:
383
+ # If VM already exists, optionally replace it
384
+ existing_vm = None
385
+ try:
386
+ candidate_vm = self.conn.lookupByName(config.name)
387
+ if candidate_vm is not None:
388
+ try:
389
+ if hasattr(candidate_vm, "name") and callable(candidate_vm.name):
390
+ if candidate_vm.name() == config.name:
391
+ existing_vm = candidate_vm
392
+ else:
380
393
  existing_vm = candidate_vm
381
- else:
394
+ except Exception:
382
395
  existing_vm = candidate_vm
383
- except Exception:
384
- existing_vm = candidate_vm
385
- except Exception:
386
- existing_vm = None
387
-
388
- if existing_vm is not None:
389
- if not replace:
390
- raise RuntimeError(
391
- f"VM '{config.name}' already exists.\n\n"
396
+ except Exception:
397
+ existing_vm = None
398
+
399
+ if existing_vm is not None:
400
+ if not replace:
401
+ raise RuntimeError(
402
+ f"VM '{config.name}' already exists.\n\n"
403
+ f"🔧 Solutions:\n"
404
+ f" 1. Reuse existing VM: clonebox start {config.name}\n"
405
+ f" 2. Replace it: clonebox clone . --name {config.name} --replace\n"
406
+ f" 3. Delete it: clonebox delete {config.name}\n"
407
+ )
408
+
409
+ log.info(f"VM '{config.name}' already exists - replacing...")
410
+ self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
411
+
412
+ # Determine images directory
413
+ images_dir = self.get_images_dir()
414
+ try:
415
+ vm_dir = ctx.add_directory(images_dir / config.name)
416
+ vm_dir.mkdir(parents=True, exist_ok=True)
417
+ except PermissionError as e:
418
+ raise PermissionError(
419
+ f"Cannot create VM directory: {images_dir / config.name}\n\n"
392
420
  f"🔧 Solutions:\n"
393
- f" 1. Reuse existing VM: clonebox start {config.name}\n"
394
- f" 2. Replace it: clonebox clone . --name {config.name} --replace\n"
395
- f" 3. Delete it: clonebox delete {config.name}\n"
396
- )
397
-
398
- log.info(f"VM '{config.name}' already exists - replacing...")
399
- self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
400
-
401
- # Determine images directory
402
- images_dir = self.get_images_dir()
403
- try:
404
- vm_dir = ctx.add_directory(images_dir / config.name)
405
- vm_dir.mkdir(parents=True, exist_ok=True)
406
- except PermissionError as e:
407
- raise PermissionError(
408
- f"Cannot create VM directory: {images_dir / config.name}\n\n"
409
- f"🔧 Solutions:\n"
410
- f" 1. Use --user flag to run in user session (recommended):\n"
411
- f" clonebox clone . --user\n\n"
412
- f" 2. Run with sudo (not recommended):\n"
413
- f" sudo clonebox clone .\n\n"
414
- f" 3. Fix directory permissions:\n"
415
- f" sudo mkdir -p {images_dir}\n"
416
- f" sudo chown -R $USER:libvirt {images_dir}\n\n"
417
- f"Original error: {e}"
418
- ) from e
419
-
420
- # Create root disk
421
- root_disk = ctx.add_file(vm_dir / "root.qcow2")
422
-
423
- if not config.base_image:
424
- config.base_image = str(self._ensure_default_base_image(console=console))
425
-
426
- if config.base_image and Path(config.base_image).exists():
427
- # Use backing file for faster creation
428
- log.debug(f"Creating disk with backing file: {config.base_image}")
429
- cmd = [
430
- "qemu-img",
431
- "create",
432
- "-f",
433
- "qcow2",
434
- "-b",
435
- config.base_image,
436
- "-F",
437
- "qcow2",
438
- str(root_disk),
439
- f"{config.disk_size_gb}G",
440
- ]
441
- else:
442
- # Create empty disk
443
- log.debug(f"Creating empty {config.disk_size_gb}GB disk...")
444
- cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
445
-
446
- subprocess.run(cmd, check=True, capture_output=True)
447
-
448
- # Create cloud-init ISO if packages/services specified
449
- cloudinit_iso = None
450
- if (
451
- config.packages
452
- or config.services
453
- or config.snap_packages
454
- or config.post_commands
455
- or config.gui
456
- ):
457
- cloudinit_iso = ctx.add_file(self._create_cloudinit_iso(vm_dir, config))
458
- log.info(f"Created cloud-init ISO with {len(config.packages)} packages")
459
-
460
- # Generate VM XML
461
- vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
462
- ctx.add_libvirt_domain(self.conn, config.name)
463
-
464
- # Define VM
465
- log.info(f"Defining VM '{config.name}'...")
466
- try:
467
- vm = self.conn.defineXML(vm_xml)
468
- except Exception as e:
469
- raise RuntimeError(
470
- f"Failed to define VM '{config.name}'.\n"
471
- f"Error: {e}\n\n"
472
- f"If the VM already exists, try: clonebox clone . --name {config.name} --replace\n"
473
- ) from e
421
+ f" 1. Use --user flag to run in user session (recommended):\n"
422
+ f" clonebox clone . --user\n\n"
423
+ f" 2. Run with sudo (not recommended):\n"
424
+ f" sudo clonebox clone .\n\n"
425
+ f" 3. Fix directory permissions:\n"
426
+ f" sudo mkdir -p {images_dir}\n"
427
+ f" sudo chown -R $USER:libvirt {images_dir}\n\n"
428
+ f"Original error: {e}"
429
+ ) from e
430
+
431
+ # Create root disk
432
+ root_disk = ctx.add_file(vm_dir / "root.qcow2")
433
+
434
+ if not config.base_image:
435
+ config.base_image = str(self._ensure_default_base_image(console=console))
436
+
437
+ if config.base_image and Path(config.base_image).exists():
438
+ # Use backing file for faster creation
439
+ log.debug(f"Creating disk with backing file: {config.base_image}")
440
+ cmd = [
441
+ "qemu-img",
442
+ "create",
443
+ "-f",
444
+ "qcow2",
445
+ "-b",
446
+ config.base_image,
447
+ "-F",
448
+ "qcow2",
449
+ str(root_disk),
450
+ f"{config.disk_size_gb}G",
451
+ ]
452
+ else:
453
+ # Create empty disk
454
+ log.debug(f"Creating empty {config.disk_size_gb}GB disk...")
455
+ cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
456
+
457
+ subprocess.run(cmd, check=True, capture_output=True)
458
+
459
+ # Create cloud-init ISO if packages/services specified
460
+ cloudinit_iso = None
461
+ if (
462
+ config.packages
463
+ or config.services
464
+ or config.snap_packages
465
+ or config.post_commands
466
+ or config.gui
467
+ ):
468
+ cloudinit_iso = ctx.add_file(self._create_cloudinit_iso(vm_dir, config, self.user_session))
469
+ log.info(f"Created cloud-init ISO with {len(config.packages)} packages")
470
+
471
+ # Generate VM XML
472
+ vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
473
+ ctx.add_libvirt_domain(self.conn, config.name)
474
+
475
+ # Define VM
476
+ log.info(f"Defining VM '{config.name}'...")
477
+ try:
478
+ vm = self.conn.defineXML(vm_xml)
479
+ except Exception as e:
480
+ raise RuntimeError(
481
+ f"Failed to define VM '{config.name}'.\n"
482
+ f"Error: {e}\n\n"
483
+ f"If the VM already exists, try: clonebox clone . --name {config.name} --replace\n"
484
+ ) from e
474
485
 
475
- # Start if autostart requested
476
- if getattr(config, "autostart", False):
477
- self.start_vm(config.name, open_viewer=True)
486
+ # Start if autostart requested
487
+ if getattr(config, "autostart", False):
488
+ self.start_vm(config.name, open_viewer=True)
478
489
 
479
- # All good - commit transaction
480
- ctx.commit()
490
+ # All good - commit transaction
491
+ ctx.commit()
481
492
 
482
- return vm.UUIDString()
493
+ return vm.UUIDString()
483
494
 
484
495
  def _generate_vm_xml(
485
496
  self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
@@ -706,6 +717,9 @@ class SelectiveVMCloner:
706
717
  for snap, ifaces in SNAP_INTERFACES.items()
707
718
  )
708
719
 
720
+ mount_points_bash = "\n".join(str(p) for p in (config.paths or {}).values())
721
+ copy_paths_bash = "\n".join(str(p) for p in (config.copy_paths or {}).values())
722
+
709
723
  script = f"""#!/bin/bash
710
724
  set -uo pipefail
711
725
  LOG="/var/log/clonebox-boot.log"
@@ -1000,12 +1014,23 @@ for app in "${{APPS_TO_TEST[@]}}"; do
1000
1014
  fi
1001
1015
  done
1002
1016
 
1003
- section "5/7" "Checking mount points..."
1004
- write_status "checking_mounts" "checking mount points"
1005
- while IFS= read -r line; do
1006
- tag=$(echo "$line" | awk '{{print $1}}')
1007
- mp=$(echo "$line" | awk '{{print $2}}')
1008
- if [[ "$tag" =~ ^mount[0-9]+$ ]] && [[ "$mp" == /* ]]; then
1017
+ section "5/7" "Checking mounts & imported paths..."
1018
+ write_status "checking_mounts" "checking mounts & imported paths"
1019
+
1020
+ MOUNT_POINTS=$(cat <<'EOF'
1021
+ {mount_points_bash}
1022
+ EOF
1023
+ )
1024
+
1025
+ COPIED_PATHS=$(cat <<'EOF'
1026
+ {copy_paths_bash}
1027
+ EOF
1028
+ )
1029
+
1030
+ # Bind mounts (shared live)
1031
+ if [ -n "$(echo "$MOUNT_POINTS" | tr -d '[:space:]')" ]; then
1032
+ while IFS= read -r mp; do
1033
+ [ -z "$mp" ] && continue
1009
1034
  if mountpoint -q "$mp" 2>/dev/null; then
1010
1035
  ok "$mp mounted"
1011
1036
  else
@@ -1018,8 +1043,28 @@ while IFS= read -r line; do
1018
1043
  fail "$mp mount FAILED"
1019
1044
  fi
1020
1045
  fi
1021
- fi
1022
- done < /etc/fstab
1046
+ done <<< "$MOUNT_POINTS"
1047
+ else
1048
+ log " (no bind mounts configured)"
1049
+ fi
1050
+
1051
+ # Imported/copied paths (one-time import)
1052
+ if [ -n "$(echo "$COPIED_PATHS" | tr -d '[:space:]')" ]; then
1053
+ while IFS= read -r p; do
1054
+ [ -z "$p" ] && continue
1055
+ if [ -d "$p" ]; then
1056
+ if [ "$(ls -A "$p" 2>/dev/null | wc -l)" -gt 0 ]; then
1057
+ ok "$p copied"
1058
+ else
1059
+ ok "$p copied (empty)"
1060
+ fi
1061
+ else
1062
+ fail "$p missing (copy)"
1063
+ fi
1064
+ done <<< "$COPIED_PATHS"
1065
+ else
1066
+ log " (no copied paths configured)"
1067
+ fi
1023
1068
 
1024
1069
  section "6/7" "Checking services..."
1025
1070
  write_status "checking_services" "checking services"
@@ -1077,6 +1122,12 @@ fi
1077
1122
  mount_checks = []
1078
1123
  for idx, (host_path, guest_path) in enumerate(config.paths.items()):
1079
1124
  mount_checks.append(f'check_mount "{guest_path}" "mount{idx}"')
1125
+
1126
+ # Add copied paths checks
1127
+ copy_paths = config.copy_paths or config.app_data_paths
1128
+ if copy_paths:
1129
+ for idx, (host_path, guest_path) in enumerate(copy_paths.items()):
1130
+ mount_checks.append(f'check_copy_path "{guest_path}"')
1080
1131
 
1081
1132
  apt_checks_str = "\n".join(apt_checks) if apt_checks else "echo 'No apt packages to check'"
1082
1133
  snap_checks_str = (
@@ -1183,6 +1234,30 @@ check_mount() {{
1183
1234
  log "[INFO] Mount point '$path' does not exist yet"
1184
1235
  return 0
1185
1236
  fi
1237
+ }
1238
+
1239
+ check_copy_path() {
1240
+ local path="$1"
1241
+ if [ -d "$path" ]; then
1242
+ if [ "$(ls -A "$path" 2>/dev/null | wc -l)" -gt 0 ]; then
1243
+ log "[PASS] Path '$path' exists and contains data"
1244
+ ((PASSED++))
1245
+ return 0
1246
+ else
1247
+ log "[WARN] Path '$path' exists but is EMPTY"
1248
+ ((WARNINGS++))
1249
+ return 1
1250
+ fi
1251
+ else
1252
+ if [ $SETUP_IN_PROGRESS -eq 1 ]; then
1253
+ log "[INFO] Path '$path' not imported yet"
1254
+ return 0
1255
+ else
1256
+ log "[FAIL] Path '$path' MISSING"
1257
+ ((FAILED++))
1258
+ return 1
1259
+ fi
1260
+ fi
1186
1261
  }}
1187
1262
 
1188
1263
  check_gui() {{
@@ -1262,7 +1337,7 @@ fi
1262
1337
  encoded = base64.b64encode(script.encode()).decode()
1263
1338
  return encoded
1264
1339
 
1265
- def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig) -> Path:
1340
+ def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig, user_session: bool = False) -> Path:
1266
1341
  """Create cloud-init ISO with secure credential handling."""
1267
1342
  secrets_mgr = SecretsManager()
1268
1343
 
@@ -1287,6 +1362,17 @@ fi
1287
1362
  ssh_authorized_keys = [key_pair.public_key]
1288
1363
  log.info(f"SSH key generated and saved to: {ssh_key_path}")
1289
1364
 
1365
+ local_password = getattr(config, "password", None)
1366
+ if getattr(config, "gui", False) and local_password:
1367
+ chpasswd_config = (
1368
+ "chpasswd:\n"
1369
+ " list: |\n"
1370
+ f" {config.username}:{local_password}\n"
1371
+ " expire: False"
1372
+ )
1373
+ lock_passwd = "false"
1374
+ ssh_pwauth = "true"
1375
+
1290
1376
  elif auth_method == "one_time_password":
1291
1377
  otp, chpasswd_raw = SecretsManager.generate_one_time_password()
1292
1378
  chpasswd_config = chpasswd_raw
@@ -1356,37 +1442,38 @@ fi
1356
1442
 
1357
1443
  # Handle copy_paths (import then copy)
1358
1444
  all_copy_paths = dict(config.copy_paths) if config.copy_paths else {}
1359
- for idx, (host_path, guest_path) in enumerate(all_copy_paths.items()):
1360
- if Path(host_path).exists():
1361
- tag = f"import{idx}"
1362
- temp_mount_point = f"/mnt/import{idx}"
1363
- # Use regular mount options
1364
- mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000"
1365
-
1366
- # 1. Create temp mount point
1367
- mount_commands.append(f" - mkdir -p {temp_mount_point}")
1368
-
1369
- # 2. Mount the 9p share
1370
- mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} || true")
1371
-
1372
- # 3. Ensure target directory exists and permissions are prepared
1373
- if str(guest_path).startswith("/home/ubuntu/"):
1374
- mount_commands.append(f" - mkdir -p {guest_path}")
1375
- mount_commands.append(f" - chown 1000:1000 {guest_path}")
1376
- else:
1377
- mount_commands.append(f" - mkdir -p {guest_path}")
1445
+ existing_copy_paths = {h: g for h, g in all_copy_paths.items() if Path(h).exists()}
1446
+
1447
+ for idx, (host_path, guest_path) in enumerate(existing_copy_paths.items()):
1448
+ tag = f"import{idx}"
1449
+ temp_mount_point = f"/mnt/import{idx}"
1450
+ # Use regular mount options
1451
+ mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000"
1452
+
1453
+ # 1. Create temp mount point
1454
+ mount_commands.append(f" - mkdir -p {temp_mount_point}")
1455
+
1456
+ # 2. Mount the 9p share
1457
+ mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} || true")
1458
+
1459
+ # 3. Ensure target directory exists and permissions are prepared
1460
+ if str(guest_path).startswith("/home/ubuntu/"):
1461
+ mount_commands.append(f" - mkdir -p {guest_path}")
1462
+ mount_commands.append(f" - chown 1000:1000 {guest_path}")
1463
+ else:
1464
+ mount_commands.append(f" - mkdir -p {guest_path}")
1378
1465
 
1379
- # 4. Copy contents (cp -rT to copy contents of source to target)
1380
- # We use || true to ensure boot continues even if copy fails
1381
- mount_commands.append(f" - echo 'Importing {host_path} to {guest_path}...'")
1382
- mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} || true")
1383
-
1384
- # 5. Fix ownership recursively
1385
- mount_commands.append(f" - chown -R 1000:1000 {guest_path}")
1466
+ # 4. Copy contents (cp -rT to copy contents of source to target)
1467
+ # We use || true to ensure boot continues even if copy fails
1468
+ mount_commands.append(f" - echo 'Importing {host_path} to {guest_path}...'")
1469
+ mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} || true")
1470
+
1471
+ # 5. Fix ownership recursively
1472
+ mount_commands.append(f" - chown -R 1000:1000 {guest_path}")
1386
1473
 
1387
- # 6. Unmount and cleanup
1388
- mount_commands.append(f" - umount {temp_mount_point} || true")
1389
- mount_commands.append(f" - rmdir {temp_mount_point} || true")
1474
+ # 6. Unmount and cleanup
1475
+ mount_commands.append(f" - umount {temp_mount_point} || true")
1476
+ mount_commands.append(f" - rmdir {temp_mount_point} || true")
1390
1477
 
1391
1478
  # User-data
1392
1479
  # Add desktop environment if GUI is enabled
@@ -1405,31 +1492,82 @@ fi
1405
1492
  # Build runcmd - services, mounts, snaps, post_commands
1406
1493
  runcmd_lines = []
1407
1494
 
1495
+ # Add detailed logging header
1496
+ runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1497
+ runcmd_lines.append(" - echo ' CloneBox VM Installation Progress'")
1498
+ runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1499
+ runcmd_lines.append(" - echo ''")
1500
+
1501
+ # Phase 1: APT Packages
1502
+ if all_packages:
1503
+ runcmd_lines.append(f" - echo '[1/9] 📦 Installing APT packages ({len(all_packages)} total)...'")
1504
+ runcmd_lines.append(" - export DEBIAN_FRONTEND=noninteractive")
1505
+ runcmd_lines.append(" - apt-get update")
1506
+ for i, pkg in enumerate(all_packages, 1):
1507
+ runcmd_lines.append(f" - echo ' → [{i}/{len(all_packages)}] Installing {pkg}...'")
1508
+ runcmd_lines.append(f" - apt-get install -y {pkg} || echo ' ⚠️ Failed to install {pkg}'")
1509
+ runcmd_lines.append(" - echo ' ✓ APT packages installed'")
1510
+ runcmd_lines.append(" - echo ''")
1511
+ else:
1512
+ runcmd_lines.append(" - echo '[1/9] 📦 No APT packages to install'")
1513
+ runcmd_lines.append(" - echo ''")
1514
+
1515
+ # Phase 2: Core services
1516
+ runcmd_lines.append(" - echo '[2/9] 🔧 Enabling core services...'")
1517
+ runcmd_lines.append(" - echo ' → qemu-guest-agent'")
1408
1518
  runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || true")
1519
+ runcmd_lines.append(" - echo ' → snapd'")
1409
1520
  runcmd_lines.append(" - systemctl enable --now snapd || true")
1521
+ runcmd_lines.append(" - echo ' → Waiting for snap system seed...'")
1410
1522
  runcmd_lines.append(" - timeout 300 snap wait system seed.loaded || true")
1523
+ runcmd_lines.append(" - echo ' ✓ Core services enabled'")
1524
+ runcmd_lines.append(" - echo ''")
1411
1525
 
1412
- # Add service enablement
1413
- for svc in config.services:
1526
+ # Phase 3: User services
1527
+ runcmd_lines.append(f" - echo '[3/9] 🔧 Enabling user services ({len(config.services)} total)...'")
1528
+ for i, svc in enumerate(config.services, 1):
1529
+ runcmd_lines.append(f" - echo ' → [{i}/{len(config.services)}] {svc}'")
1414
1530
  runcmd_lines.append(f" - systemctl enable --now {svc} || true")
1531
+ runcmd_lines.append(" - echo ' ✓ User services enabled'")
1532
+ runcmd_lines.append(" - echo ''")
1415
1533
 
1416
- # Add fstab entries for persistent mounts after reboot
1534
+ # Phase 4: Filesystem mounts
1535
+ runcmd_lines.append(f" - echo '[4/9] 📁 Mounting shared directories ({len(fstab_entries)} mounts)...'")
1417
1536
  if fstab_entries:
1418
1537
  runcmd_lines.append(
1419
1538
  " - grep -q '^# CloneBox 9p mounts' /etc/fstab || echo '# CloneBox 9p mounts' >> /etc/fstab"
1420
1539
  )
1421
- for entry in fstab_entries:
1540
+ for i, entry in enumerate(fstab_entries, 1):
1541
+ mount_point = entry.split()[1] if len(entry.split()) > 1 else entry
1542
+ runcmd_lines.append(f" - echo ' → [{i}/{len(fstab_entries)}] {mount_point}'")
1422
1543
  runcmd_lines.append(
1423
1544
  f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab"
1424
1545
  )
1425
1546
  runcmd_lines.append(" - mount -a || true")
1547
+ runcmd_lines.append(" - echo ' ✓ Mounts configured'")
1548
+ runcmd_lines.append(" - echo ''")
1549
+
1550
+ # Phase 5: Data Import (copied paths)
1551
+ if existing_copy_paths:
1552
+ runcmd_lines.append(f" - echo '[5/9] 📥 Importing data ({len(existing_copy_paths)} paths)...'")
1553
+ # Add mounts (immediate, before reboot)
1554
+ import_count = 0
1555
+ for cmd in mount_commands:
1556
+ if "Importing" in cmd:
1557
+ import_count += 1
1558
+ runcmd_lines.append(cmd.replace("Importing", f" → [{import_count}/{len(existing_copy_paths)}] Importing"))
1559
+ else:
1560
+ runcmd_lines.append(cmd)
1561
+ runcmd_lines.append(" - echo ' ✓ Data import completed'")
1562
+ runcmd_lines.append(" - echo ''")
1563
+ else:
1564
+ runcmd_lines.append(" - echo '[5/9] 📥 No data to import'")
1565
+ runcmd_lines.append(" - echo ''")
1426
1566
 
1427
- # Add mounts (immediate, before reboot)
1428
- for cmd in mount_commands:
1429
- runcmd_lines.append(cmd)
1430
-
1431
- # Create user directories with correct permissions EARLY to avoid race conditions with GDM
1567
+ # Phase 6: GUI Environment Setup
1432
1568
  if config.gui:
1569
+ runcmd_lines.append(" - echo '[6/9] 🖥️ Setting up GUI environment...'")
1570
+ runcmd_lines.append(" - echo ' → Creating user directories'")
1433
1571
  # Create directories that GNOME services need
1434
1572
  runcmd_lines.extend(
1435
1573
  [
@@ -1439,41 +1577,56 @@ fi
1439
1577
  " - chown -R 1000:1000 /home/ubuntu/.config /home/ubuntu/.cache /home/ubuntu/.local",
1440
1578
  " - chmod 700 /home/ubuntu/.config /home/ubuntu/.cache",
1441
1579
  " - systemctl set-default graphical.target",
1442
- " - systemctl enable --now gdm3 || systemctl enable --now gdm || true",
1443
- " - systemctl start display-manager || true",
1580
+ " - echo ' → Starting display manager'",
1444
1581
  ]
1445
1582
  )
1583
+ runcmd_lines.append(" - systemctl enable --now gdm3 || systemctl enable --now gdm || true")
1584
+ runcmd_lines.append(" - systemctl start display-manager || true")
1585
+ runcmd_lines.append(" - echo ' ✓ GUI environment ready'")
1586
+ runcmd_lines.append(" - echo ''")
1587
+ else:
1588
+ runcmd_lines.append(" - echo '[6/9] 🖥️ No GUI requested'")
1589
+ runcmd_lines.append(" - echo ''")
1446
1590
 
1447
1591
  runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu || true")
1448
1592
  runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/snap || true")
1449
1593
 
1450
- # Install snap packages (with retry logic)
1594
+ # Phase 7: Snap packages
1451
1595
  if config.snap_packages:
1452
- runcmd_lines.append(" - echo 'Installing snap packages...'")
1453
- for snap_pkg in config.snap_packages:
1596
+ runcmd_lines.append(f" - echo '[7/9] 📦 Installing snap packages ({len(config.snap_packages)} packages)...'")
1597
+ for i, snap_pkg in enumerate(config.snap_packages, 1):
1598
+ runcmd_lines.append(f" - echo ' → [{i}/{len(config.snap_packages)}] {snap_pkg}'")
1454
1599
  # Try classic first, then strict, with retries
1455
1600
  cmd = (
1456
1601
  f"for i in 1 2 3; do "
1457
- f"snap install {snap_pkg} --classic && break || "
1458
- f"snap install {snap_pkg} && break || "
1459
- f"sleep 10; "
1602
+ f"snap install {snap_pkg} --classic && echo ' ✓ {snap_pkg} installed (classic)' && break || "
1603
+ f"snap install {snap_pkg} && echo ' ✓ {snap_pkg} installed' && break || "
1604
+ f"echo ' ⟳ Retry $i/3...' && sleep 10; "
1460
1605
  f"done"
1461
1606
  )
1462
1607
  runcmd_lines.append(f" - {cmd}")
1608
+ runcmd_lines.append(" - echo ' ✓ Snap packages installed'")
1609
+ runcmd_lines.append(" - echo ''")
1463
1610
 
1464
1611
  # Connect snap interfaces for GUI apps (not auto-connected via cloud-init)
1465
- runcmd_lines.append(" - echo 'Connecting snap interfaces...'")
1612
+ runcmd_lines.append(f" - echo ' 🔌 Connecting snap interfaces...'")
1466
1613
  for snap_pkg in config.snap_packages:
1614
+ runcmd_lines.append(f" - echo ' → {snap_pkg}'")
1467
1615
  interfaces = SNAP_INTERFACES.get(snap_pkg, DEFAULT_SNAP_INTERFACES)
1468
1616
  for iface in interfaces:
1469
1617
  runcmd_lines.append(
1470
1618
  f" - snap connect {snap_pkg}:{iface} :{iface} 2>/dev/null || true"
1471
1619
  )
1472
-
1620
+ runcmd_lines.append(" - echo ' ✓ Snap interfaces connected'")
1473
1621
  runcmd_lines.append(" - systemctl restart snapd || true")
1622
+ runcmd_lines.append(" - echo ''")
1623
+ else:
1624
+ runcmd_lines.append(" - echo '[7/9] 📦 No snap packages to install'")
1625
+ runcmd_lines.append(" - echo ''")
1474
1626
 
1475
1627
  # Add remaining GUI setup if enabled
1476
1628
  if config.gui:
1629
+ runcmd_lines.append(" - echo ' ⚙️ Creating autostart entries...'")
1477
1630
  # Create autostart entries for GUI apps
1478
1631
  autostart_apps = {
1479
1632
  "pycharm-community": (
@@ -1525,15 +1678,28 @@ Comment=CloneBox autostart
1525
1678
 
1526
1679
  # Fix ownership of autostart directory
1527
1680
  runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/.config/autostart")
1681
+ runcmd_lines.append(" - echo ' ✓ Autostart entries created'")
1682
+ runcmd_lines.append(" - echo ''")
1528
1683
 
1529
- # Run user-defined post commands
1684
+ # Phase 8: Post commands
1530
1685
  if config.post_commands:
1531
- runcmd_lines.append(" - echo 'Running post-setup commands...'")
1532
- for cmd in config.post_commands:
1686
+ runcmd_lines.append(f" - echo '[8/9] ⚙️ Running post-setup commands ({len(config.post_commands)} commands)...'")
1687
+ for i, cmd in enumerate(config.post_commands, 1):
1688
+ # Truncate long commands for display
1689
+ display_cmd = cmd[:60] + '...' if len(cmd) > 60 else cmd
1690
+ runcmd_lines.append(f" - echo ' → [{i}/{len(config.post_commands)}] {display_cmd}'")
1533
1691
  runcmd_lines.append(f" - {cmd}")
1692
+ runcmd_lines.append(f" - echo ' ✓ Command {i} completed'")
1693
+ runcmd_lines.append(" - echo ' ✓ Post-setup commands completed'")
1694
+ runcmd_lines.append(" - echo ''")
1695
+ else:
1696
+ runcmd_lines.append(" - echo '[8/9] ⚙️ No post-setup commands'")
1697
+ runcmd_lines.append(" - echo ''")
1534
1698
 
1535
1699
  # Generate health check script
1536
1700
  health_script = self._generate_health_check_script(config)
1701
+ # Phase 9: Health checks and finalization
1702
+ runcmd_lines.append(" - echo '[9/9] 🏥 Running health checks...'")
1537
1703
  runcmd_lines.append(
1538
1704
  f" - echo '{health_script}' | base64 -d > /usr/local/bin/clonebox-health"
1539
1705
  )
@@ -1541,8 +1707,15 @@ Comment=CloneBox autostart
1541
1707
  runcmd_lines.append(
1542
1708
  " - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1 || true"
1543
1709
  )
1710
+ runcmd_lines.append(" - echo ' ✓ Health checks completed'")
1544
1711
  runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
1545
-
1712
+
1713
+ # Final status
1714
+ runcmd_lines.append(" - echo ''")
1715
+ runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1716
+ runcmd_lines.append(" - echo ' ✅ CloneBox VM Installation Complete!'")
1717
+ runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
1718
+ runcmd_lines.append(" - echo ''")
1546
1719
  # Generate boot diagnostic script (self-healing)
1547
1720
  boot_diag_script = self._generate_boot_diagnostic_script(config)
1548
1721
  runcmd_lines.append(
@@ -2273,25 +2446,32 @@ if __name__ == "__main__":
2273
2446
  # Note: The bash monitor is already installed above, no need to install Python monitor
2274
2447
 
2275
2448
  # Create logs disk for host access
2449
+ # Use different paths based on session type
2450
+ if user_session:
2451
+ logs_disk_path = str(Path.home() / ".local/share/libvirt/images/clonebox-logs.qcow2")
2452
+ else:
2453
+ logs_disk_path = "/var/lib/libvirt/images/clonebox-logs.qcow2"
2454
+
2276
2455
  runcmd_lines.extend(
2277
2456
  [
2278
2457
  " - mkdir -p /mnt/logs",
2279
- " - truncate -s 1G /var/lib/libvirt/images/clonebox-logs.qcow2",
2280
- " - mkfs.ext4 -F /var/lib/libvirt/images/clonebox-logs.qcow2",
2281
- " - echo '/var/lib/libvirt/images/clonebox-logs.qcow2 /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
2458
+ f" - truncate -s 1G {logs_disk_path}",
2459
+ f" - mkfs.ext4 -F {logs_disk_path}",
2460
+ f" - echo '{logs_disk_path} /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
2282
2461
  " - mount -a",
2283
2462
  " - mkdir -p /mnt/logs/var/log",
2284
2463
  " - mkdir -p /mnt/logs/tmp",
2285
2464
  " - cp -r /var/log/clonebox*.log /mnt/logs/var/log/ 2>/dev/null || true",
2286
2465
  " - cp -r /tmp/*-error.log /mnt/logs/tmp/ 2>/dev/null || true",
2287
- " - echo 'Logs disk mounted at /mnt/logs - accessible from host as /var/lib/libvirt/images/clonebox-logs.qcow2'",
2288
- " - \"echo 'To view logs on host: sudo mount -o loop /var/lib/libvirt/images/clonebox-logs.qcow2 /mnt/clonebox-logs'\"",
2466
+ f" - echo 'Logs disk mounted at /mnt/logs - accessible from host as {logs_disk_path}'",
2467
+ f" - \"echo 'To view logs on host: sudo mount -o loop {logs_disk_path} /mnt/clonebox-logs'\"",
2289
2468
  ]
2290
2469
  )
2291
2470
 
2292
2471
  # Add reboot command at the end if GUI is enabled
2293
2472
  if config.gui:
2294
- runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
2473
+ runcmd_lines.append(" - echo '🔄 Rebooting in 10 seconds to start GUI...'")
2474
+ runcmd_lines.append(" - echo ' (After reboot, GUI will auto-start)'")
2295
2475
  runcmd_lines.append(" - sleep 10 && reboot")
2296
2476
 
2297
2477
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
@@ -2339,9 +2519,8 @@ package_update: true
2339
2519
  package_upgrade: false
2340
2520
  {bootcmd_block}
2341
2521
 
2342
- # Install packages (cloud-init waits for completion before runcmd)
2343
- packages:
2344
- {packages_yaml}
2522
+ # Install packages moved to runcmd for better logging
2523
+ packages: []
2345
2524
 
2346
2525
  # Run after packages are installed
2347
2526
  runcmd: