clonebox 1.1.18__py3-none-any.whl → 1.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/backends/libvirt_backend.py +3 -1
- clonebox/cli.py +591 -556
- clonebox/cloner.py +465 -412
- clonebox/health/probes.py +14 -0
- clonebox/policies/__init__.py +13 -0
- clonebox/policies/engine.py +112 -0
- clonebox/policies/models.py +55 -0
- clonebox/policies/validators.py +26 -0
- clonebox/validator.py +70 -28
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/METADATA +1 -1
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/RECORD +15 -11
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/WHEEL +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/entry_points.txt +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/licenses/LICENSE +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.19.dist-info}/top_level.txt +0 -0
clonebox/cloner.py
CHANGED
|
@@ -32,6 +32,7 @@ from clonebox.interfaces.disk import DiskManager
|
|
|
32
32
|
from clonebox.interfaces.hypervisor import HypervisorBackend
|
|
33
33
|
from clonebox.interfaces.network import NetworkManager
|
|
34
34
|
from clonebox.logging import get_logger, log_operation
|
|
35
|
+
from clonebox.policies import PolicyEngine, PolicyViolationError
|
|
35
36
|
from clonebox.resources import ResourceLimits
|
|
36
37
|
from clonebox.rollback import vm_creation_transaction
|
|
37
38
|
from clonebox.secrets import SecretsManager, SSHKeyPair
|
|
@@ -231,6 +232,13 @@ class SelectiveVMCloner:
|
|
|
231
232
|
url=self.DEFAULT_BASE_IMAGE_URL,
|
|
232
233
|
)
|
|
233
234
|
|
|
235
|
+
policy = PolicyEngine.load_effective()
|
|
236
|
+
if policy is not None:
|
|
237
|
+
try:
|
|
238
|
+
policy.assert_url_allowed(self.DEFAULT_BASE_IMAGE_URL)
|
|
239
|
+
except PolicyViolationError as e:
|
|
240
|
+
raise RuntimeError(str(e)) from e
|
|
241
|
+
|
|
234
242
|
try:
|
|
235
243
|
import urllib.request
|
|
236
244
|
|
|
@@ -302,8 +310,20 @@ class SelectiveVMCloner:
|
|
|
302
310
|
"images_dir_writable": False,
|
|
303
311
|
"images_dir": str(images_dir),
|
|
304
312
|
"session_type": "user" if self.user_session else "system",
|
|
313
|
+
"genisoimage_installed": False,
|
|
314
|
+
"virt_viewer_installed": False,
|
|
315
|
+
"qemu_img_installed": False,
|
|
305
316
|
}
|
|
306
317
|
|
|
318
|
+
# Check for genisoimage
|
|
319
|
+
checks["genisoimage_installed"] = shutil.which("genisoimage") is not None
|
|
320
|
+
|
|
321
|
+
# Check for virt-viewer
|
|
322
|
+
checks["virt_viewer_installed"] = shutil.which("virt-viewer") is not None
|
|
323
|
+
|
|
324
|
+
# Check for qemu-img
|
|
325
|
+
checks["qemu_img_installed"] = shutil.which("qemu-img") is not None
|
|
326
|
+
|
|
307
327
|
# Check libvirt connection
|
|
308
328
|
if self.conn and self.conn.isAlive():
|
|
309
329
|
checks["libvirt_connected"] = True
|
|
@@ -338,8 +358,9 @@ class SelectiveVMCloner:
|
|
|
338
358
|
checks["images_dir_error"] = (
|
|
339
359
|
f"Cannot write to {images_dir}\n"
|
|
340
360
|
f" Option 1: Run with sudo\n"
|
|
341
|
-
f" Option 2: Use --user flag for user session (
|
|
342
|
-
f"
|
|
361
|
+
f" Option 2: Use --user flag for user session (recommended):\n"
|
|
362
|
+
f" clonebox clone . --user\n\n"
|
|
363
|
+
f" 3. Fix permissions: sudo chown -R $USER:libvirt {images_dir}"
|
|
343
364
|
)
|
|
344
365
|
else:
|
|
345
366
|
# Try to create it
|
|
@@ -355,7 +376,13 @@ class SelectiveVMCloner:
|
|
|
355
376
|
|
|
356
377
|
return checks
|
|
357
378
|
|
|
358
|
-
def create_vm(
|
|
379
|
+
def create_vm(
|
|
380
|
+
self,
|
|
381
|
+
config: VMConfig,
|
|
382
|
+
console=None,
|
|
383
|
+
replace: bool = False,
|
|
384
|
+
approved: bool = False,
|
|
385
|
+
) -> str:
|
|
359
386
|
"""
|
|
360
387
|
Create a VM with only selected applications/paths.
|
|
361
388
|
|
|
@@ -407,7 +434,19 @@ class SelectiveVMCloner:
|
|
|
407
434
|
)
|
|
408
435
|
|
|
409
436
|
log.info(f"VM '{config.name}' already exists - replacing...")
|
|
410
|
-
|
|
437
|
+
policy = PolicyEngine.load_effective()
|
|
438
|
+
if policy is not None:
|
|
439
|
+
policy.assert_operation_approved(
|
|
440
|
+
AuditEventType.VM_DELETE.value,
|
|
441
|
+
approved=approved,
|
|
442
|
+
)
|
|
443
|
+
self.delete_vm(
|
|
444
|
+
config.name,
|
|
445
|
+
delete_storage=True,
|
|
446
|
+
console=console,
|
|
447
|
+
ignore_not_found=True,
|
|
448
|
+
approved=approved,
|
|
449
|
+
)
|
|
411
450
|
|
|
412
451
|
# Determine images directory
|
|
413
452
|
images_dir = self.get_images_dir()
|
|
@@ -436,7 +475,7 @@ class SelectiveVMCloner:
|
|
|
436
475
|
|
|
437
476
|
if config.base_image and Path(config.base_image).exists():
|
|
438
477
|
# Use backing file for faster creation
|
|
439
|
-
log.
|
|
478
|
+
log.info(f"Creating root disk ({config.disk_size_gb}GB) using backing file: {config.base_image}")
|
|
440
479
|
cmd = [
|
|
441
480
|
"qemu-img",
|
|
442
481
|
"create",
|
|
@@ -451,12 +490,12 @@ class SelectiveVMCloner:
|
|
|
451
490
|
]
|
|
452
491
|
else:
|
|
453
492
|
# Create empty disk
|
|
454
|
-
log.
|
|
493
|
+
log.info(f"Creating empty {config.disk_size_gb}GB root disk...")
|
|
455
494
|
cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
|
|
456
495
|
|
|
457
496
|
subprocess.run(cmd, check=True, capture_output=True)
|
|
458
497
|
|
|
459
|
-
# Create cloud-init ISO if packages/services specified
|
|
498
|
+
# Create cloud-init ISO if packages/services/paths specified
|
|
460
499
|
cloudinit_iso = None
|
|
461
500
|
if (
|
|
462
501
|
config.packages
|
|
@@ -464,9 +503,11 @@ class SelectiveVMCloner:
|
|
|
464
503
|
or config.snap_packages
|
|
465
504
|
or config.post_commands
|
|
466
505
|
or config.gui
|
|
506
|
+
or config.paths
|
|
507
|
+
or config.copy_paths
|
|
467
508
|
):
|
|
468
509
|
cloudinit_iso = ctx.add_file(self._create_cloudinit_iso(vm_dir, config, self.user_session))
|
|
469
|
-
log.info(f"Created cloud-init ISO
|
|
510
|
+
log.info(f"Created cloud-init ISO for VM setup")
|
|
470
511
|
|
|
471
512
|
# Generate VM XML
|
|
472
513
|
vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
|
|
@@ -492,207 +533,11 @@ class SelectiveVMCloner:
|
|
|
492
533
|
|
|
493
534
|
return vm.UUIDString()
|
|
494
535
|
|
|
495
|
-
def _generate_vm_xml(
|
|
496
|
-
self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
|
|
497
|
-
) -> str:
|
|
498
|
-
"""Generate libvirt XML for the VM."""
|
|
499
|
-
|
|
500
|
-
# Backward compatibility: if called without args, try to derive defaults
|
|
501
|
-
if config is None:
|
|
502
|
-
config = VMConfig()
|
|
503
|
-
if root_disk is None:
|
|
504
|
-
root_disk = Path("/var/lib/libvirt/images/default-disk.qcow2")
|
|
505
|
-
|
|
506
|
-
# Get resource limits from config or defaults
|
|
507
|
-
resource_data = getattr(config, "resources", {})
|
|
508
|
-
if not resource_data:
|
|
509
|
-
# Fallback to top-level fields
|
|
510
|
-
resource_data = {
|
|
511
|
-
"cpu": {"vcpus": config.vcpus},
|
|
512
|
-
"memory": {"limit": f"{config.ram_mb}M"},
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
limits = ResourceLimits.from_dict(resource_data)
|
|
516
|
-
|
|
517
|
-
root = ET.Element("domain", type="kvm")
|
|
518
|
-
|
|
519
|
-
# Basic metadata
|
|
520
|
-
ET.SubElement(root, "name").text = config.name
|
|
521
|
-
ET.SubElement(root, "uuid").text = str(uuid.uuid4())
|
|
522
|
-
|
|
523
|
-
# Memory configuration using limits
|
|
524
|
-
limit_kib = limits.memory.limit_bytes // 1024
|
|
525
|
-
ET.SubElement(root, "memory", unit="KiB").text = str(limit_kib)
|
|
526
|
-
ET.SubElement(root, "currentMemory", unit="KiB").text = str(limit_kib)
|
|
527
|
-
|
|
528
|
-
# CPU configuration
|
|
529
|
-
ET.SubElement(root, "vcpu", placement="static").text = str(limits.cpu.vcpus)
|
|
530
|
-
|
|
531
|
-
# OS configuration
|
|
532
|
-
os_elem = ET.SubElement(root, "os")
|
|
533
|
-
ET.SubElement(os_elem, "type", arch="x86_64", machine="q35").text = "hvm"
|
|
534
|
-
ET.SubElement(os_elem, "boot", dev="hd")
|
|
535
|
-
|
|
536
|
-
# Features
|
|
537
|
-
features = ET.SubElement(root, "features")
|
|
538
|
-
ET.SubElement(features, "acpi")
|
|
539
|
-
ET.SubElement(features, "apic")
|
|
540
|
-
|
|
541
|
-
# Resource tuning (CPU and Memory)
|
|
542
|
-
cputune_xml = limits.cpu.to_libvirt_xml()
|
|
543
|
-
if cputune_xml:
|
|
544
|
-
# We append pre-generated XML string later or use ET to parse it
|
|
545
|
-
# For simplicity with existing ET code, we'll use SubElement for basic ones
|
|
546
|
-
# and manual string insertion for complex tuning if needed,
|
|
547
|
-
# but let's try to stick to ET where possible.
|
|
548
|
-
pass
|
|
549
|
-
|
|
550
|
-
# CPU tuning element
|
|
551
|
-
# Only available in system session (requires cgroups)
|
|
552
|
-
if not self.user_session and (limits.cpu.shares or limits.cpu.quota or limits.cpu.pin):
|
|
553
|
-
cputune = ET.SubElement(root, "cputune")
|
|
554
|
-
ET.SubElement(cputune, "shares").text = str(limits.cpu.shares)
|
|
555
|
-
if limits.cpu.quota:
|
|
556
|
-
ET.SubElement(cputune, "period").text = str(limits.cpu.period)
|
|
557
|
-
ET.SubElement(cputune, "quota").text = str(limits.cpu.quota)
|
|
558
|
-
if limits.cpu.pin:
|
|
559
|
-
for idx, cpu in enumerate(limits.cpu.pin):
|
|
560
|
-
ET.SubElement(cputune, "vcpupin", vcpu=str(idx), cpuset=str(cpu))
|
|
561
|
-
|
|
562
|
-
# Memory tuning element
|
|
563
|
-
# Only available in system session (requires cgroups)
|
|
564
|
-
if not self.user_session and (limits.memory.soft_limit or limits.memory.swap):
|
|
565
|
-
memtune = ET.SubElement(root, "memtune")
|
|
566
|
-
ET.SubElement(memtune, "hard_limit", unit="KiB").text = str(limit_kib)
|
|
567
|
-
if limits.memory.soft_limit_bytes:
|
|
568
|
-
ET.SubElement(memtune, "soft_limit", unit="KiB").text = str(limits.memory.soft_limit_bytes // 1024)
|
|
569
|
-
if limits.memory.swap_bytes:
|
|
570
|
-
ET.SubElement(memtune, "swap_hard_limit", unit="KiB").text = str(limits.memory.swap_bytes // 1024)
|
|
571
|
-
|
|
572
|
-
# CPU
|
|
573
|
-
ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
|
|
574
|
-
|
|
575
|
-
# Devices
|
|
576
|
-
devices = ET.SubElement(root, "devices")
|
|
577
|
-
|
|
578
|
-
# Emulator
|
|
579
|
-
ET.SubElement(devices, "emulator").text = "/usr/bin/qemu-system-x86_64"
|
|
580
|
-
|
|
581
|
-
# Root disk
|
|
582
|
-
disk = ET.SubElement(devices, "disk", type="file", device="disk")
|
|
583
|
-
ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
|
|
584
|
-
ET.SubElement(disk, "source", file=str(root_disk))
|
|
585
|
-
ET.SubElement(disk, "target", dev="vda", bus="virtio")
|
|
586
|
-
|
|
587
|
-
# Disk I/O tuning
|
|
588
|
-
# Only available in system session (requires cgroups)
|
|
589
|
-
if not self.user_session and (limits.disk.read_bps or limits.disk.write_bps or limits.disk.read_iops or limits.disk.write_iops):
|
|
590
|
-
iotune = ET.SubElement(disk, "iotune")
|
|
591
|
-
if limits.disk.read_bps_bytes:
|
|
592
|
-
ET.SubElement(iotune, "read_bytes_sec").text = str(limits.disk.read_bps_bytes)
|
|
593
|
-
if limits.disk.write_bps_bytes:
|
|
594
|
-
ET.SubElement(iotune, "write_bytes_sec").text = str(limits.disk.write_bps_bytes)
|
|
595
|
-
if limits.disk.read_iops:
|
|
596
|
-
ET.SubElement(iotune, "read_iops_sec").text = str(limits.disk.read_iops)
|
|
597
|
-
if limits.disk.write_iops:
|
|
598
|
-
ET.SubElement(iotune, "write_iops_sec").text = str(limits.disk.write_iops)
|
|
599
|
-
|
|
600
|
-
# Cloud-init ISO
|
|
601
|
-
if cloudinit_iso:
|
|
602
|
-
cdrom = ET.SubElement(devices, "disk", type="file", device="cdrom")
|
|
603
|
-
ET.SubElement(cdrom, "driver", name="qemu", type="raw")
|
|
604
|
-
ET.SubElement(cdrom, "source", file=str(cloudinit_iso))
|
|
605
|
-
ET.SubElement(cdrom, "target", dev="sda", bus="sata")
|
|
606
|
-
ET.SubElement(cdrom, "readonly")
|
|
607
|
-
|
|
608
|
-
# 9p filesystem mounts (bind mounts from host)
|
|
609
|
-
# Use accessmode="mapped" to allow VM user to access host files regardless of UID
|
|
610
|
-
for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
|
|
611
|
-
if Path(host_path).exists():
|
|
612
|
-
fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
|
|
613
|
-
ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
|
|
614
|
-
ET.SubElement(fs, "source", dir=host_path)
|
|
615
|
-
# Use simple tag names for 9p mounts
|
|
616
|
-
tag = f"mount{idx}"
|
|
617
|
-
ET.SubElement(fs, "target", dir=tag)
|
|
618
|
-
|
|
619
|
-
# 9p filesystem mounts for COPY paths (mounted to temp location for import)
|
|
620
|
-
for idx, (host_path, guest_path) in enumerate(config.copy_paths.items()):
|
|
621
|
-
if Path(host_path).exists():
|
|
622
|
-
fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
|
|
623
|
-
ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
|
|
624
|
-
ET.SubElement(fs, "source", dir=host_path)
|
|
625
|
-
# Use import tag names for copy mounts
|
|
626
|
-
tag = f"import{idx}"
|
|
627
|
-
ET.SubElement(fs, "target", dir=tag)
|
|
628
|
-
|
|
629
|
-
# Network interface
|
|
630
|
-
network_mode = self.resolve_network_mode(config)
|
|
631
|
-
if network_mode == "user":
|
|
632
|
-
iface = ET.SubElement(devices, "interface", type="user")
|
|
633
|
-
ET.SubElement(iface, "model", type="virtio")
|
|
634
|
-
else:
|
|
635
|
-
iface = ET.SubElement(devices, "interface", type="network")
|
|
636
|
-
ET.SubElement(iface, "source", network="default")
|
|
637
|
-
ET.SubElement(iface, "model", type="virtio")
|
|
638
|
-
|
|
639
|
-
# Network bandwidth tuning
|
|
640
|
-
if limits.network.inbound or limits.network.outbound:
|
|
641
|
-
bandwidth = ET.SubElement(iface, "bandwidth")
|
|
642
|
-
if limits.network.inbound_kbps:
|
|
643
|
-
# average in KB/s
|
|
644
|
-
ET.SubElement(bandwidth, "inbound", average=str(limits.network.inbound_kbps // 8))
|
|
645
|
-
if limits.network.outbound_kbps:
|
|
646
|
-
ET.SubElement(bandwidth, "outbound", average=str(limits.network.outbound_kbps // 8))
|
|
647
|
-
|
|
648
|
-
# Serial console
|
|
649
|
-
serial = ET.SubElement(devices, "serial", type="pty")
|
|
650
|
-
ET.SubElement(serial, "target", port="0")
|
|
651
|
-
|
|
652
|
-
console_elem = ET.SubElement(devices, "console", type="pty")
|
|
653
|
-
ET.SubElement(console_elem, "target", type="serial", port="0")
|
|
654
|
-
|
|
655
|
-
# Graphics (SPICE)
|
|
656
|
-
if config.gui:
|
|
657
|
-
graphics = ET.SubElement(
|
|
658
|
-
devices, "graphics", type="spice", autoport="yes", listen="127.0.0.1"
|
|
659
|
-
)
|
|
660
|
-
ET.SubElement(graphics, "listen", type="address", address="127.0.0.1")
|
|
661
|
-
|
|
662
|
-
# Video
|
|
663
|
-
video = ET.SubElement(devices, "video")
|
|
664
|
-
ET.SubElement(video, "model", type="virtio", heads="1", primary="yes")
|
|
665
|
-
|
|
666
|
-
# Input devices
|
|
667
|
-
ET.SubElement(devices, "input", type="tablet", bus="usb")
|
|
668
|
-
ET.SubElement(devices, "input", type="keyboard", bus="usb")
|
|
669
|
-
|
|
670
|
-
ET.SubElement(devices, "controller", type="virtio-serial", index="0")
|
|
671
|
-
|
|
672
|
-
# Channel for guest agent
|
|
673
|
-
channel = ET.SubElement(devices, "channel", type="unix")
|
|
674
|
-
ET.SubElement(channel, "source", mode="bind")
|
|
675
|
-
ET.SubElement(channel, "target", type="virtio", name="org.qemu.guest_agent.0")
|
|
676
|
-
|
|
677
|
-
# Memory balloon
|
|
678
|
-
memballoon = ET.SubElement(devices, "memballoon", model="virtio")
|
|
679
|
-
ET.SubElement(
|
|
680
|
-
memballoon,
|
|
681
|
-
"address",
|
|
682
|
-
type="pci",
|
|
683
|
-
domain="0x0000",
|
|
684
|
-
bus="0x00",
|
|
685
|
-
slot="0x08",
|
|
686
|
-
function="0x0",
|
|
687
|
-
)
|
|
688
|
-
|
|
689
|
-
return ET.tostring(root, encoding="unicode")
|
|
690
|
-
|
|
691
536
|
def _generate_boot_diagnostic_script(self, config: VMConfig) -> str:
|
|
692
537
|
"""Generate boot diagnostic script with self-healing capabilities."""
|
|
693
538
|
import base64
|
|
694
539
|
|
|
695
|
-
|
|
540
|
+
wants_chrome = any(
|
|
696
541
|
p == "/home/ubuntu/.config/google-chrome"
|
|
697
542
|
for p in list((config.paths or {}).values()) + list((config.copy_paths or {}).values())
|
|
698
543
|
)
|
|
@@ -773,7 +618,7 @@ check_snap() {{
|
|
|
773
618
|
}}
|
|
774
619
|
|
|
775
620
|
install_snap() {{
|
|
776
|
-
timeout
|
|
621
|
+
timeout 120 snap wait system seed.loaded 2>/dev/null || true
|
|
777
622
|
for i in $(seq 1 $MAX_RETRIES); do
|
|
778
623
|
snap install "$1" --classic &>>"$LOG" && return 0
|
|
779
624
|
snap install "$1" &>>"$LOG" && return 0
|
|
@@ -967,7 +812,7 @@ for pkg in "${{SNAP_PACKAGES[@]}}"; do
|
|
|
967
812
|
[ -z "$pkg" ] && continue
|
|
968
813
|
APPS_TO_TEST+=("$pkg")
|
|
969
814
|
done
|
|
970
|
-
if [ "{str(
|
|
815
|
+
if [ "{str(wants_chrome).lower()}" = "true" ]; then
|
|
971
816
|
APPS_TO_TEST+=("google-chrome")
|
|
972
817
|
fi
|
|
973
818
|
if printf '%s\n' "${{APT_PACKAGES[@]}}" | grep -qx "docker.io"; then
|
|
@@ -1108,26 +953,28 @@ fi
|
|
|
1108
953
|
|
|
1109
954
|
# Build package check commands
|
|
1110
955
|
apt_checks = []
|
|
1111
|
-
for pkg in config.packages:
|
|
1112
|
-
apt_checks.append(f'check_apt_package "{pkg}"')
|
|
956
|
+
for i, pkg in enumerate(config.packages, 1):
|
|
957
|
+
apt_checks.append(f'check_apt_package "{pkg}" "{i}/{len(config.packages)}"')
|
|
1113
958
|
|
|
1114
959
|
snap_checks = []
|
|
1115
|
-
for pkg in config.snap_packages:
|
|
1116
|
-
snap_checks.append(f'check_snap_package "{pkg}"')
|
|
960
|
+
for i, pkg in enumerate(config.snap_packages, 1):
|
|
961
|
+
snap_checks.append(f'check_snap_package "{pkg}" "{i}/{len(config.snap_packages)}"')
|
|
1117
962
|
|
|
1118
963
|
service_checks = []
|
|
1119
|
-
for svc in config.services:
|
|
1120
|
-
service_checks.append(f'check_service "{svc}"')
|
|
964
|
+
for i, svc in enumerate(config.services, 1):
|
|
965
|
+
service_checks.append(f'check_service "{svc}" "{i}/{len(config.services)}"')
|
|
1121
966
|
|
|
1122
967
|
mount_checks = []
|
|
1123
|
-
|
|
1124
|
-
|
|
968
|
+
bind_paths = list(config.paths.items())
|
|
969
|
+
for i, (host_path, guest_tag) in enumerate(bind_paths, 1):
|
|
970
|
+
mount_checks.append(f'check_mount "{guest_tag}" "mount{i-1}" "{i}/{len(bind_paths)}"')
|
|
1125
971
|
|
|
1126
972
|
# Add copied paths checks
|
|
1127
973
|
copy_paths = config.copy_paths or getattr(config, "app_data_paths", {})
|
|
1128
974
|
if copy_paths:
|
|
1129
|
-
|
|
1130
|
-
|
|
975
|
+
copy_list = list(copy_paths.items())
|
|
976
|
+
for i, (host_path, guest_path) in enumerate(copy_list, 1):
|
|
977
|
+
mount_checks.append(f'check_copy_path "{guest_path}" "{i}/{len(copy_list)}"')
|
|
1131
978
|
|
|
1132
979
|
apt_checks_str = "\n".join(apt_checks) if apt_checks else "echo 'No apt packages to check'"
|
|
1133
980
|
snap_checks_str = (
|
|
@@ -1138,7 +985,7 @@ fi
|
|
|
1138
985
|
)
|
|
1139
986
|
mount_checks_str = "\n".join(mount_checks) if mount_checks else "echo 'No mounts to check'"
|
|
1140
987
|
|
|
1141
|
-
script =
|
|
988
|
+
script = fr"""#!/bin/bash
|
|
1142
989
|
# CloneBox Health Check Script
|
|
1143
990
|
# Generated automatically - validates all installed components
|
|
1144
991
|
|
|
@@ -1159,6 +1006,10 @@ NC='\\033[0m'
|
|
|
1159
1006
|
|
|
1160
1007
|
log() {{
|
|
1161
1008
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$REPORT_FILE"
|
|
1009
|
+
# If it's a PASS/FAIL/INFO/WARN line, also echo with prefix for the monitor
|
|
1010
|
+
if [[ "$1" =~ ^\[(PASS|FAIL|WARN|INFO)\] ]]; then
|
|
1011
|
+
echo " → $1"
|
|
1012
|
+
fi
|
|
1162
1013
|
}}
|
|
1163
1014
|
|
|
1164
1015
|
check_disk_space() {{
|
|
@@ -1184,17 +1035,18 @@ check_disk_space() {{
|
|
|
1184
1035
|
|
|
1185
1036
|
check_apt_package() {{
|
|
1186
1037
|
local pkg="$1"
|
|
1038
|
+
local progress="$2"
|
|
1187
1039
|
if dpkg -l "$pkg" 2>/dev/null | grep -q "^ii"; then
|
|
1188
|
-
log "[PASS] APT package '$pkg' is installed"
|
|
1040
|
+
log "[PASS] [$progress] APT package '$pkg' is installed"
|
|
1189
1041
|
((PASSED++))
|
|
1190
1042
|
return 0
|
|
1191
1043
|
else
|
|
1192
1044
|
if [ $SETUP_IN_PROGRESS -eq 1 ]; then
|
|
1193
|
-
log "[WARN] APT package '$pkg' is not installed yet"
|
|
1045
|
+
log "[WARN] [$progress] APT package '$pkg' is not installed yet"
|
|
1194
1046
|
((WARNINGS++))
|
|
1195
1047
|
return 1
|
|
1196
1048
|
else
|
|
1197
|
-
log "[FAIL] APT package '$pkg' is NOT installed"
|
|
1049
|
+
log "[FAIL] [$progress] APT package '$pkg' is NOT installed"
|
|
1198
1050
|
((FAILED++))
|
|
1199
1051
|
return 1
|
|
1200
1052
|
fi
|
|
@@ -1203,19 +1055,20 @@ check_apt_package() {{
|
|
|
1203
1055
|
|
|
1204
1056
|
check_snap_package() {{
|
|
1205
1057
|
local pkg="$1"
|
|
1058
|
+
local progress="$2"
|
|
1206
1059
|
local out
|
|
1207
1060
|
out=$(snap list "$pkg" 2>&1)
|
|
1208
1061
|
if [ $? -eq 0 ]; then
|
|
1209
|
-
log "[PASS] Snap package '$pkg' is installed"
|
|
1062
|
+
log "[PASS] [$progress] Snap package '$pkg' is installed"
|
|
1210
1063
|
((PASSED++))
|
|
1211
1064
|
return 0
|
|
1212
1065
|
else
|
|
1213
1066
|
if [ $SETUP_IN_PROGRESS -eq 1 ]; then
|
|
1214
|
-
log "[WARN] Snap package '$pkg' is not installed yet"
|
|
1067
|
+
log "[WARN] [$progress] Snap package '$pkg' is not installed yet"
|
|
1215
1068
|
((WARNINGS++))
|
|
1216
1069
|
return 1
|
|
1217
1070
|
else
|
|
1218
|
-
log "[FAIL] Snap package '$pkg' is NOT installed"
|
|
1071
|
+
log "[FAIL] [$progress] Snap package '$pkg' is NOT installed"
|
|
1219
1072
|
((FAILED++))
|
|
1220
1073
|
return 1
|
|
1221
1074
|
fi
|
|
@@ -1224,18 +1077,19 @@ check_snap_package() {{
|
|
|
1224
1077
|
|
|
1225
1078
|
check_service() {{
|
|
1226
1079
|
local svc="$1"
|
|
1080
|
+
local progress="$2"
|
|
1227
1081
|
if systemctl is-enabled "$svc" &>/dev/null; then
|
|
1228
1082
|
if systemctl is-active "$svc" &>/dev/null; then
|
|
1229
|
-
log "[PASS] Service '$svc' is enabled and running"
|
|
1083
|
+
log "[PASS] [$progress] Service '$svc' is enabled and running"
|
|
1230
1084
|
((PASSED++))
|
|
1231
1085
|
return 0
|
|
1232
1086
|
else
|
|
1233
|
-
log "[WARN] Service '$svc' is enabled but not running"
|
|
1087
|
+
log "[WARN] [$progress] Service '$svc' is enabled but not running"
|
|
1234
1088
|
((WARNINGS++))
|
|
1235
1089
|
return 1
|
|
1236
1090
|
fi
|
|
1237
1091
|
else
|
|
1238
|
-
log "[INFO] Service '$svc' is not enabled (may be optional)"
|
|
1092
|
+
log "[INFO] [$progress] Service '$svc' is not enabled (may be optional)"
|
|
1239
1093
|
return 0
|
|
1240
1094
|
fi
|
|
1241
1095
|
}}
|
|
@@ -1243,38 +1097,40 @@ check_service() {{
|
|
|
1243
1097
|
check_mount() {{
|
|
1244
1098
|
local path="$1"
|
|
1245
1099
|
local tag="$2"
|
|
1100
|
+
local progress="$3"
|
|
1246
1101
|
if mountpoint -q "$path" 2>/dev/null; then
|
|
1247
|
-
log "[PASS] Mount '$path' ($tag) is active"
|
|
1102
|
+
log "[PASS] [$progress] Mount '$path' ($tag) is active"
|
|
1248
1103
|
((PASSED++))
|
|
1249
1104
|
return 0
|
|
1250
1105
|
elif [ -d "$path" ]; then
|
|
1251
|
-
log "[WARN] Directory '$path' exists but not mounted"
|
|
1106
|
+
log "[WARN] [$progress] Directory '$path' exists but not mounted"
|
|
1252
1107
|
((WARNINGS++))
|
|
1253
1108
|
return 1
|
|
1254
1109
|
else
|
|
1255
|
-
log "[INFO] Mount point '$path' does not exist yet"
|
|
1110
|
+
log "[INFO] [$progress] Mount point '$path' does not exist yet"
|
|
1256
1111
|
return 0
|
|
1257
1112
|
fi
|
|
1258
1113
|
}}
|
|
1259
1114
|
|
|
1260
1115
|
check_copy_path() {{
|
|
1261
1116
|
local path="$1"
|
|
1117
|
+
local progress="$2"
|
|
1262
1118
|
if [ -d "$path" ]; then
|
|
1263
1119
|
if [ "$(ls -A "$path" 2>/dev/null | wc -l)" -gt 0 ]; then
|
|
1264
|
-
log "[PASS] Path '$path' exists and contains data"
|
|
1120
|
+
log "[PASS] [$progress] Path '$path' exists and contains data"
|
|
1265
1121
|
((PASSED++))
|
|
1266
1122
|
return 0
|
|
1267
1123
|
else
|
|
1268
|
-
log "[WARN] Path '$path' exists but is EMPTY"
|
|
1124
|
+
log "[WARN] [$progress] Path '$path' exists but is EMPTY"
|
|
1269
1125
|
((WARNINGS++))
|
|
1270
1126
|
return 1
|
|
1271
1127
|
fi
|
|
1272
1128
|
else
|
|
1273
1129
|
if [ $SETUP_IN_PROGRESS -eq 1 ]; then
|
|
1274
|
-
log "[INFO] Path '$path' not imported yet"
|
|
1130
|
+
log "[INFO] [$progress] Path '$path' not imported yet"
|
|
1275
1131
|
return 0
|
|
1276
1132
|
else
|
|
1277
|
-
log "[FAIL] Path '$path' MISSING"
|
|
1133
|
+
log "[FAIL] [$progress] Path '$path' MISSING"
|
|
1278
1134
|
((FAILED++))
|
|
1279
1135
|
return 1
|
|
1280
1136
|
fi
|
|
@@ -1363,10 +1219,213 @@ fi
|
|
|
1363
1219
|
encoded = base64.b64encode(script.encode()).decode()
|
|
1364
1220
|
return encoded
|
|
1365
1221
|
|
|
1222
|
+
def _generate_vm_xml(
|
|
1223
|
+
self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
|
|
1224
|
+
) -> str:
|
|
1225
|
+
"""Generate libvirt XML for the VM."""
|
|
1226
|
+
|
|
1227
|
+
# Backward compatibility: if called without args, try to derive defaults
|
|
1228
|
+
if config is None:
|
|
1229
|
+
config = VMConfig()
|
|
1230
|
+
if root_disk is None:
|
|
1231
|
+
root_disk = Path("/var/lib/libvirt/images/default-disk.qcow2")
|
|
1232
|
+
|
|
1233
|
+
# Get resource limits from config or defaults
|
|
1234
|
+
resource_data = getattr(config, "resources", {})
|
|
1235
|
+
if not resource_data:
|
|
1236
|
+
# Fallback to top-level fields
|
|
1237
|
+
resource_data = {
|
|
1238
|
+
"cpu": {"vcpus": config.vcpus},
|
|
1239
|
+
"memory": {"limit": f"{config.ram_mb}M"},
|
|
1240
|
+
}
|
|
1241
|
+
|
|
1242
|
+
limits = ResourceLimits.from_dict(resource_data)
|
|
1243
|
+
|
|
1244
|
+
root = ET.Element("domain", type="kvm")
|
|
1245
|
+
|
|
1246
|
+
# Basic metadata
|
|
1247
|
+
ET.SubElement(root, "name").text = config.name
|
|
1248
|
+
ET.SubElement(root, "uuid").text = str(uuid.uuid4())
|
|
1249
|
+
|
|
1250
|
+
# Memory configuration using limits
|
|
1251
|
+
limit_kib = limits.memory.limit_bytes // 1024
|
|
1252
|
+
ET.SubElement(root, "memory", unit="KiB").text = str(limit_kib)
|
|
1253
|
+
ET.SubElement(root, "currentMemory", unit="KiB").text = str(limit_kib)
|
|
1254
|
+
|
|
1255
|
+
# CPU configuration
|
|
1256
|
+
ET.SubElement(root, "vcpu", placement="static").text = str(limits.cpu.vcpus)
|
|
1257
|
+
|
|
1258
|
+
# OS configuration
|
|
1259
|
+
os_elem = ET.SubElement(root, "os")
|
|
1260
|
+
ET.SubElement(os_elem, "type", arch="x86_64", machine="q35").text = "hvm"
|
|
1261
|
+
ET.SubElement(os_elem, "boot", dev="hd")
|
|
1262
|
+
|
|
1263
|
+
# Features
|
|
1264
|
+
features = ET.SubElement(root, "features")
|
|
1265
|
+
ET.SubElement(features, "acpi")
|
|
1266
|
+
ET.SubElement(features, "apic")
|
|
1267
|
+
|
|
1268
|
+
# Resource tuning (CPU and Memory)
|
|
1269
|
+
cputune_xml = limits.cpu.to_libvirt_xml()
|
|
1270
|
+
if cputune_xml:
|
|
1271
|
+
# We append pre-generated XML string later or use ET to parse it
|
|
1272
|
+
# For simplicity with existing ET code, we'll use SubElement for basic ones
|
|
1273
|
+
# and manual string insertion for complex tuning if needed,
|
|
1274
|
+
# but let's try to stick to ET where possible.
|
|
1275
|
+
pass
|
|
1276
|
+
|
|
1277
|
+
# CPU tuning element
|
|
1278
|
+
# Only available in system session (requires cgroups)
|
|
1279
|
+
if not self.user_session and (limits.cpu.shares or limits.cpu.quota or limits.cpu.pin):
|
|
1280
|
+
cputune = ET.SubElement(root, "cputune")
|
|
1281
|
+
ET.SubElement(cputune, "shares").text = str(limits.cpu.shares)
|
|
1282
|
+
if limits.cpu.quota:
|
|
1283
|
+
ET.SubElement(cputune, "period").text = str(limits.cpu.period)
|
|
1284
|
+
ET.SubElement(cputune, "quota").text = str(limits.cpu.quota)
|
|
1285
|
+
if limits.cpu.pin:
|
|
1286
|
+
for idx, cpu in enumerate(limits.cpu.pin):
|
|
1287
|
+
ET.SubElement(cputune, "vcpupin", vcpu=str(idx), cpuset=str(cpu))
|
|
1288
|
+
|
|
1289
|
+
# Memory tuning element
|
|
1290
|
+
# Only available in system session (requires cgroups)
|
|
1291
|
+
if not self.user_session and (limits.memory.soft_limit or limits.memory.swap):
|
|
1292
|
+
memtune = ET.SubElement(root, "memtune")
|
|
1293
|
+
ET.SubElement(memtune, "hard_limit", unit="KiB").text = str(limit_kib)
|
|
1294
|
+
if limits.memory.soft_limit_bytes:
|
|
1295
|
+
ET.SubElement(memtune, "soft_limit", unit="KiB").text = str(limits.memory.soft_limit_bytes // 1024)
|
|
1296
|
+
if limits.memory.swap_bytes:
|
|
1297
|
+
ET.SubElement(memtune, "swap_hard_limit", unit="KiB").text = str(limits.memory.swap_bytes // 1024)
|
|
1298
|
+
|
|
1299
|
+
# CPU
|
|
1300
|
+
ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
|
|
1301
|
+
|
|
1302
|
+
# Devices
|
|
1303
|
+
devices = ET.SubElement(root, "devices")
|
|
1304
|
+
|
|
1305
|
+
# Emulator
|
|
1306
|
+
ET.SubElement(devices, "emulator").text = "/usr/bin/qemu-system-x86_64"
|
|
1307
|
+
|
|
1308
|
+
# Root disk
|
|
1309
|
+
disk = ET.SubElement(devices, "disk", type="file", device="disk")
|
|
1310
|
+
ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
|
|
1311
|
+
ET.SubElement(disk, "source", file=str(root_disk))
|
|
1312
|
+
ET.SubElement(disk, "target", dev="vda", bus="virtio")
|
|
1313
|
+
|
|
1314
|
+
# Disk I/O tuning
|
|
1315
|
+
# Only available in system session (requires cgroups)
|
|
1316
|
+
if not self.user_session and (limits.disk.read_bps or limits.disk.write_bps or limits.disk.read_iops or limits.disk.write_iops):
|
|
1317
|
+
iotune = ET.SubElement(disk, "iotune")
|
|
1318
|
+
if limits.disk.read_bps_bytes:
|
|
1319
|
+
ET.SubElement(iotune, "read_bytes_sec").text = str(limits.disk.read_bps_bytes)
|
|
1320
|
+
if limits.disk.write_bps_bytes:
|
|
1321
|
+
ET.SubElement(iotune, "write_bytes_sec").text = str(limits.disk.write_bps_bytes)
|
|
1322
|
+
if limits.disk.read_iops:
|
|
1323
|
+
ET.SubElement(iotune, "read_iops_sec").text = str(limits.disk.read_iops)
|
|
1324
|
+
if limits.disk.write_iops:
|
|
1325
|
+
ET.SubElement(iotune, "write_iops_sec").text = str(limits.disk.write_iops)
|
|
1326
|
+
|
|
1327
|
+
# Cloud-init ISO
|
|
1328
|
+
if cloudinit_iso:
|
|
1329
|
+
cdrom = ET.SubElement(devices, "disk", type="file", device="cdrom")
|
|
1330
|
+
ET.SubElement(cdrom, "driver", name="qemu", type="raw")
|
|
1331
|
+
ET.SubElement(cdrom, "source", file=str(cloudinit_iso))
|
|
1332
|
+
ET.SubElement(cdrom, "target", dev="sda", bus="sata")
|
|
1333
|
+
ET.SubElement(cdrom, "readonly")
|
|
1334
|
+
|
|
1335
|
+
# 9p filesystem mounts (bind mounts from host)
|
|
1336
|
+
# Use accessmode="mapped" to allow VM user to access host files regardless of UID
|
|
1337
|
+
for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
|
|
1338
|
+
if Path(host_path).exists():
|
|
1339
|
+
fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
|
|
1340
|
+
ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
|
|
1341
|
+
ET.SubElement(fs, "source", dir=host_path)
|
|
1342
|
+
# Use simple tag names for 9p mounts
|
|
1343
|
+
tag = f"mount{idx}"
|
|
1344
|
+
ET.SubElement(fs, "target", dir=tag)
|
|
1345
|
+
|
|
1346
|
+
# 9p filesystem mounts for COPY paths (mounted to temp location for import)
|
|
1347
|
+
for idx, (host_path, guest_path) in enumerate(config.copy_paths.items()):
|
|
1348
|
+
if Path(host_path).exists():
|
|
1349
|
+
fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
|
|
1350
|
+
ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
|
|
1351
|
+
ET.SubElement(fs, "source", dir=host_path)
|
|
1352
|
+
# Use import tag names for copy mounts
|
|
1353
|
+
tag = f"import{idx}"
|
|
1354
|
+
ET.SubElement(fs, "target", dir=tag)
|
|
1355
|
+
|
|
1356
|
+
# Network interface
|
|
1357
|
+
network_mode = self.resolve_network_mode(config)
|
|
1358
|
+
if network_mode == "user":
|
|
1359
|
+
iface = ET.SubElement(devices, "interface", type="user")
|
|
1360
|
+
ET.SubElement(iface, "model", type="virtio")
|
|
1361
|
+
else:
|
|
1362
|
+
iface = ET.SubElement(devices, "interface", type="network")
|
|
1363
|
+
ET.SubElement(iface, "source", network="default")
|
|
1364
|
+
ET.SubElement(iface, "model", type="virtio")
|
|
1365
|
+
|
|
1366
|
+
# Network bandwidth tuning
|
|
1367
|
+
if limits.network.inbound or limits.network.outbound:
|
|
1368
|
+
bandwidth = ET.SubElement(iface, "bandwidth")
|
|
1369
|
+
if limits.network.inbound_kbps:
|
|
1370
|
+
# average in KB/s
|
|
1371
|
+
ET.SubElement(bandwidth, "inbound", average=str(limits.network.inbound_kbps // 8))
|
|
1372
|
+
if limits.network.outbound_kbps:
|
|
1373
|
+
ET.SubElement(bandwidth, "outbound", average=str(limits.network.outbound_kbps // 8))
|
|
1374
|
+
|
|
1375
|
+
# Serial console
|
|
1376
|
+
serial = ET.SubElement(devices, "serial", type="pty")
|
|
1377
|
+
ET.SubElement(serial, "target", port="0")
|
|
1378
|
+
|
|
1379
|
+
console_elem = ET.SubElement(devices, "console", type="pty")
|
|
1380
|
+
ET.SubElement(console_elem, "target", type="serial", port="0")
|
|
1381
|
+
|
|
1382
|
+
# Graphics (SPICE)
|
|
1383
|
+
if config.gui:
|
|
1384
|
+
graphics = ET.SubElement(
|
|
1385
|
+
devices, "graphics", type="spice", autoport="yes", listen="127.0.0.1"
|
|
1386
|
+
)
|
|
1387
|
+
ET.SubElement(graphics, "listen", type="address", address="127.0.0.1")
|
|
1388
|
+
|
|
1389
|
+
# Video
|
|
1390
|
+
video = ET.SubElement(devices, "video")
|
|
1391
|
+
ET.SubElement(video, "model", type="virtio", heads="1", primary="yes")
|
|
1392
|
+
|
|
1393
|
+
# Input devices
|
|
1394
|
+
ET.SubElement(devices, "input", type="tablet", bus="usb")
|
|
1395
|
+
ET.SubElement(devices, "input", type="keyboard", bus="usb")
|
|
1396
|
+
|
|
1397
|
+
ET.SubElement(devices, "controller", type="virtio-serial", index="0")
|
|
1398
|
+
|
|
1399
|
+
# Channel for guest agent
|
|
1400
|
+
channel = ET.SubElement(devices, "channel", type="unix")
|
|
1401
|
+
ET.SubElement(channel, "source", mode="bind")
|
|
1402
|
+
ET.SubElement(channel, "target", type="virtio", name="org.qemu.guest_agent.0")
|
|
1403
|
+
|
|
1404
|
+
# Memory balloon
|
|
1405
|
+
memballoon = ET.SubElement(devices, "memballoon", model="virtio")
|
|
1406
|
+
ET.SubElement(
|
|
1407
|
+
memballoon,
|
|
1408
|
+
"address",
|
|
1409
|
+
type="pci",
|
|
1410
|
+
domain="0x0000",
|
|
1411
|
+
bus="0x00",
|
|
1412
|
+
slot="0x08",
|
|
1413
|
+
function="0x0",
|
|
1414
|
+
)
|
|
1415
|
+
|
|
1416
|
+
return ET.tostring(root, encoding="unicode")
|
|
1417
|
+
|
|
1366
1418
|
def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig, user_session: bool = False) -> Path:
|
|
1367
1419
|
"""Create cloud-init ISO with secure credential handling."""
|
|
1368
1420
|
secrets_mgr = SecretsManager()
|
|
1369
1421
|
|
|
1422
|
+
# Determine if Chrome is wanted early to avoid NameError in helpers
|
|
1423
|
+
wants_chrome = any(
|
|
1424
|
+
p == "/home/ubuntu/.config/google-chrome"
|
|
1425
|
+
for p in list((config.paths or {}).values())
|
|
1426
|
+
+ list((config.copy_paths or {}).values())
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1370
1429
|
# Determine authentication method
|
|
1371
1430
|
auth_method = getattr(config, "auth_method", "ssh_key")
|
|
1372
1431
|
|
|
@@ -1432,10 +1491,10 @@ fi
|
|
|
1432
1491
|
bind_mount_commands = []
|
|
1433
1492
|
fstab_entries = []
|
|
1434
1493
|
all_paths = dict(config.paths) if config.paths else {}
|
|
1494
|
+
existing_bind_paths = {h: g for h, g in all_paths.items() if Path(h).exists()}
|
|
1435
1495
|
pre_chown_dirs: set[str] = set()
|
|
1436
|
-
for idx, (host_path, guest_path) in enumerate(
|
|
1437
|
-
|
|
1438
|
-
# Ensure all parent directories in /home/ubuntu are owned by user
|
|
1496
|
+
for idx, (host_path, guest_path) in enumerate(existing_bind_paths.items()):
|
|
1497
|
+
# Ensure all parent directories in /home/ubuntu are owned by user
|
|
1439
1498
|
# This prevents "Permission denied" when creating config dirs (e.g. .config) as root
|
|
1440
1499
|
if str(guest_path).startswith("/home/ubuntu/"):
|
|
1441
1500
|
try:
|
|
@@ -1481,7 +1540,7 @@ fi
|
|
|
1481
1540
|
import_mount_commands.append(f" - mkdir -p {temp_mount_point}")
|
|
1482
1541
|
|
|
1483
1542
|
# 2. Mount the 9p share
|
|
1484
|
-
import_mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} ||
|
|
1543
|
+
import_mount_commands.append(f" - mount -t 9p -o {mount_opts} {tag} {temp_mount_point} || echo ' → ❌ Failed to mount temporary share {tag}'")
|
|
1485
1544
|
|
|
1486
1545
|
# 3. Ensure target directory exists and permissions are prepared
|
|
1487
1546
|
if str(guest_path).startswith("/home/ubuntu/"):
|
|
@@ -1492,8 +1551,8 @@ fi
|
|
|
1492
1551
|
|
|
1493
1552
|
# 4. Copy contents (cp -rT to copy contents of source to target)
|
|
1494
1553
|
# We use || true to ensure boot continues even if copy fails
|
|
1495
|
-
import_mount_commands.append(f" - echo 'Importing {host_path} to {guest_path}...'")
|
|
1496
|
-
import_mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} ||
|
|
1554
|
+
import_mount_commands.append(f" - echo ' → Importing {host_path} to {guest_path}...'")
|
|
1555
|
+
import_mount_commands.append(f" - cp -rT {temp_mount_point} {guest_path} || echo ' → ❌ Failed to copy data to {guest_path}'")
|
|
1497
1556
|
|
|
1498
1557
|
# 5. Fix ownership recursively
|
|
1499
1558
|
import_mount_commands.append(f" - chown -R 1000:1000 {guest_path}")
|
|
@@ -1519,55 +1578,80 @@ fi
|
|
|
1519
1578
|
# Build runcmd - services, mounts, snaps, post_commands
|
|
1520
1579
|
runcmd_lines = []
|
|
1521
1580
|
|
|
1581
|
+
# wants_chrome moved to top of method to avoid NameError
|
|
1582
|
+
|
|
1522
1583
|
# Add detailed logging header
|
|
1523
1584
|
runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
|
|
1524
1585
|
runcmd_lines.append(" - echo ' CloneBox VM Installation Progress'")
|
|
1525
1586
|
runcmd_lines.append(" - echo '═══════════════════════════════════════════════════════════'")
|
|
1526
1587
|
runcmd_lines.append(" - echo ''")
|
|
1588
|
+
|
|
1589
|
+
# Phase 1: System Optimization (Pre-install)
|
|
1590
|
+
runcmd_lines.append(" - echo '[1/10] 🛠️ Optimizing system resources...'")
|
|
1591
|
+
runcmd_lines.append(" - echo ' → Limiting journal size to 50M'")
|
|
1592
|
+
runcmd_lines.append(" - sed -i 's/^#SystemMaxUse=/SystemMaxUse=50M/' /etc/systemd/journald.conf || true")
|
|
1593
|
+
runcmd_lines.append(" - systemctl restart systemd-journald || true")
|
|
1594
|
+
runcmd_lines.append(" - echo ' → ✓ [1/10] System resources optimized'")
|
|
1595
|
+
runcmd_lines.append(" - echo ''")
|
|
1527
1596
|
|
|
1528
|
-
# Phase
|
|
1597
|
+
# Phase 2: APT Packages
|
|
1529
1598
|
if all_packages:
|
|
1530
|
-
runcmd_lines.append(f" - echo '[
|
|
1599
|
+
runcmd_lines.append(f" - echo '[2/10] 📦 Installing APT packages ({len(all_packages)} total)...'")
|
|
1531
1600
|
runcmd_lines.append(" - export DEBIAN_FRONTEND=noninteractive")
|
|
1601
|
+
# Check space before starting
|
|
1602
|
+
runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 524288 ]; then echo ' → ⚠️ WARNING: Low disk space (<512MB) before APT install'; fi")
|
|
1603
|
+
runcmd_lines.append(" - echo ' → Updating package repositories...'")
|
|
1532
1604
|
runcmd_lines.append(" - apt-get update")
|
|
1533
1605
|
for i, pkg in enumerate(all_packages, 1):
|
|
1534
1606
|
runcmd_lines.append(f" - echo ' → [{i}/{len(all_packages)}] Installing {pkg}...'")
|
|
1535
|
-
runcmd_lines.append(f" - apt-get install -y {pkg} || echo '
|
|
1607
|
+
runcmd_lines.append(f" - apt-get install -y {pkg} || echo ' → ❌ Failed to install {pkg}'")
|
|
1608
|
+
if pkg == "qemu-guest-agent":
|
|
1609
|
+
runcmd_lines.append(
|
|
1610
|
+
" - systemctl enable --now qemu-guest-agent || echo ' → ❌ Failed to enable qemu-guest-agent'"
|
|
1611
|
+
)
|
|
1536
1612
|
runcmd_lines.append(" - apt-get clean")
|
|
1537
|
-
runcmd_lines.append(" - echo ' ✓ APT packages installed
|
|
1613
|
+
runcmd_lines.append(" - echo ' → ✓ [2/10] APT packages installed'")
|
|
1614
|
+
runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
|
|
1538
1615
|
runcmd_lines.append(" - echo ''")
|
|
1539
1616
|
else:
|
|
1540
|
-
runcmd_lines.append(" - echo '[
|
|
1617
|
+
runcmd_lines.append(" - echo '[2/10] 📦 No APT packages to install'")
|
|
1541
1618
|
runcmd_lines.append(" - echo ''")
|
|
1542
1619
|
|
|
1543
|
-
# Phase
|
|
1544
|
-
runcmd_lines.append(" - echo '[
|
|
1545
|
-
runcmd_lines.append(" - echo ' → qemu-guest-agent'")
|
|
1546
|
-
runcmd_lines.append(" - systemctl enable --now qemu-guest-agent ||
|
|
1547
|
-
runcmd_lines.append(" - echo ' → snapd'")
|
|
1548
|
-
runcmd_lines.append(" - systemctl enable --now snapd ||
|
|
1620
|
+
# Phase 3: Core services
|
|
1621
|
+
runcmd_lines.append(" - echo '[3/10] 🔧 Enabling core services...'")
|
|
1622
|
+
runcmd_lines.append(" - echo ' → [1/2] Enabling qemu-guest-agent'")
|
|
1623
|
+
runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || echo ' → ❌ Failed to enable qemu-guest-agent'")
|
|
1624
|
+
runcmd_lines.append(" - echo ' → [2/2] Enabling snapd'")
|
|
1625
|
+
runcmd_lines.append(" - systemctl enable --now snapd || echo ' → ❌ Failed to enable snapd'")
|
|
1549
1626
|
runcmd_lines.append(" - echo ' → Waiting for snap system seed...'")
|
|
1550
1627
|
runcmd_lines.append(" - timeout 300 snap wait system seed.loaded || true")
|
|
1551
|
-
runcmd_lines.append(" - echo ' ✓ Core services enabled'")
|
|
1628
|
+
runcmd_lines.append(" - echo ' → ✓ [3/10] Core services enabled'")
|
|
1552
1629
|
runcmd_lines.append(" - echo ''")
|
|
1553
1630
|
|
|
1554
|
-
# Phase
|
|
1555
|
-
runcmd_lines.append(f" - echo '[
|
|
1631
|
+
# Phase 4: User services
|
|
1632
|
+
runcmd_lines.append(f" - echo '[4/10] 🔧 Enabling user services ({len(config.services)} total)...'")
|
|
1556
1633
|
for i, svc in enumerate(config.services, 1):
|
|
1557
1634
|
runcmd_lines.append(f" - echo ' → [{i}/{len(config.services)}] {svc}'")
|
|
1558
|
-
runcmd_lines.append(f" - systemctl enable --now {svc} ||
|
|
1559
|
-
runcmd_lines.append(" - echo ' ✓ User services enabled'")
|
|
1635
|
+
runcmd_lines.append(f" - systemctl enable --now {svc} || echo ' → ❌ Failed to enable {svc}'")
|
|
1636
|
+
runcmd_lines.append(" - echo ' → ✓ [4/10] User services enabled'")
|
|
1560
1637
|
runcmd_lines.append(" - echo ''")
|
|
1561
1638
|
|
|
1562
|
-
# Phase
|
|
1563
|
-
runcmd_lines.append(f" - echo '[
|
|
1639
|
+
# Phase 5: Filesystem mounts
|
|
1640
|
+
runcmd_lines.append(f" - echo '[5/10] 📁 Mounting shared directories ({len(existing_bind_paths)} mounts)...'")
|
|
1564
1641
|
if bind_mount_commands:
|
|
1642
|
+
mount_idx = 0
|
|
1565
1643
|
for cmd in bind_mount_commands:
|
|
1566
1644
|
if "mount -t 9p" in cmd:
|
|
1645
|
+
mount_idx += 1
|
|
1567
1646
|
# Extract mount point for logging
|
|
1568
|
-
parts = cmd.split()
|
|
1569
|
-
|
|
1570
|
-
|
|
1647
|
+
parts = cmd.strip().split()
|
|
1648
|
+
# Look for the path before '||'
|
|
1649
|
+
try:
|
|
1650
|
+
sep_idx = parts.index("||")
|
|
1651
|
+
mp = parts[sep_idx - 1]
|
|
1652
|
+
except ValueError:
|
|
1653
|
+
mp = parts[-1]
|
|
1654
|
+
runcmd_lines.append(f" - echo ' → [{mount_idx}/{len(existing_bind_paths)}] Mounting {mp}...'")
|
|
1571
1655
|
runcmd_lines.append(cmd)
|
|
1572
1656
|
|
|
1573
1657
|
if fstab_entries:
|
|
@@ -1578,163 +1662,110 @@ fi
|
|
|
1578
1662
|
runcmd_lines.append(
|
|
1579
1663
|
f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab"
|
|
1580
1664
|
)
|
|
1581
|
-
runcmd_lines.append(" - mount -a ||
|
|
1582
|
-
runcmd_lines.append(" - echo ' ✓ Mounts configured'")
|
|
1665
|
+
runcmd_lines.append(" - mount -a || echo ' → ❌ Failed to mount shared directories'")
|
|
1666
|
+
runcmd_lines.append(" - echo ' → ✓ [5/10] Mounts configured'")
|
|
1583
1667
|
runcmd_lines.append(" - echo ''")
|
|
1584
1668
|
|
|
1585
|
-
# Phase
|
|
1669
|
+
# Phase 6: Data Import (copied paths)
|
|
1586
1670
|
if existing_copy_paths:
|
|
1587
|
-
runcmd_lines.append(f" - echo '[
|
|
1671
|
+
runcmd_lines.append(f" - echo '[6/10] 📥 Importing data ({len(existing_copy_paths)} paths)...'")
|
|
1672
|
+
# Check space before starting large import
|
|
1673
|
+
runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 1048576 ]; then echo ' → ⚠️ WARNING: Low disk space (<1GB) before data import'; fi")
|
|
1588
1674
|
# Add import commands with progress
|
|
1589
1675
|
import_count = 0
|
|
1590
1676
|
for cmd in import_mount_commands:
|
|
1591
1677
|
if "Importing" in cmd:
|
|
1592
1678
|
import_count += 1
|
|
1593
|
-
|
|
1679
|
+
# Replace the placeholder 'Importing' with numbered progress, ensuring no double prefix
|
|
1680
|
+
msg = cmd.replace(" - echo ' → Importing", f" - echo ' → [{import_count}/{len(existing_copy_paths)}] Importing")
|
|
1681
|
+
runcmd_lines.append(msg)
|
|
1594
1682
|
else:
|
|
1595
1683
|
runcmd_lines.append(cmd)
|
|
1596
|
-
runcmd_lines.append(" - echo ' ✓ Data import completed'")
|
|
1684
|
+
runcmd_lines.append(" - echo ' → ✓ [6/10] Data import completed'")
|
|
1685
|
+
runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
|
|
1597
1686
|
runcmd_lines.append(" - echo ''")
|
|
1598
1687
|
else:
|
|
1599
|
-
runcmd_lines.append(" - echo '[
|
|
1688
|
+
runcmd_lines.append(" - echo '[6/10] 📥 No data to import'")
|
|
1600
1689
|
runcmd_lines.append(" - echo ''")
|
|
1601
1690
|
|
|
1602
|
-
# Phase
|
|
1691
|
+
# Phase 7: GUI Environment Setup
|
|
1603
1692
|
if config.gui:
|
|
1604
|
-
runcmd_lines.append(" - echo '[
|
|
1693
|
+
runcmd_lines.append(" - echo '[7/10] 🖥️ Setting up GUI environment...'")
|
|
1605
1694
|
runcmd_lines.append(" - echo ' → Creating user directories'")
|
|
1606
1695
|
# Create directories that GNOME services need
|
|
1696
|
+
gui_dirs = [
|
|
1697
|
+
f"/home/{config.username}/.config/pulse",
|
|
1698
|
+
f"/home/{config.username}/.cache/ibus",
|
|
1699
|
+
f"/home/{config.username}/.local/share",
|
|
1700
|
+
f"/home/{config.username}/.config/dconf",
|
|
1701
|
+
f"/home/{config.username}/.cache/tracker3",
|
|
1702
|
+
f"/home/{config.username}/.config/autostart",
|
|
1703
|
+
]
|
|
1704
|
+
for i, d in enumerate(gui_dirs, 1):
|
|
1705
|
+
runcmd_lines.append(f" - mkdir -p {d} && echo ' → [{i}/{len(gui_dirs)}] Created {d}'")
|
|
1706
|
+
|
|
1607
1707
|
runcmd_lines.extend(
|
|
1608
1708
|
[
|
|
1609
|
-
" -
|
|
1610
|
-
" -
|
|
1611
|
-
" - mkdir -p /home/ubuntu/.config/autostart",
|
|
1612
|
-
" - chown -R 1000:1000 /home/ubuntu/.config /home/ubuntu/.cache /home/ubuntu/.local",
|
|
1613
|
-
" - chmod 700 /home/ubuntu/.config /home/ubuntu/.cache",
|
|
1709
|
+
f" - chown -R 1000:1000 /home/{config.username}/.config /home/{config.username}/.cache /home/{config.username}/.local",
|
|
1710
|
+
f" - chmod 700 /home/{config.username}/.config /home/{config.username}/.cache",
|
|
1614
1711
|
" - systemctl set-default graphical.target",
|
|
1615
1712
|
" - echo ' → Starting display manager'",
|
|
1616
1713
|
]
|
|
1617
1714
|
)
|
|
1618
1715
|
runcmd_lines.append(" - systemctl enable --now gdm3 || systemctl enable --now gdm || true")
|
|
1619
1716
|
runcmd_lines.append(" - systemctl start display-manager || true")
|
|
1620
|
-
runcmd_lines.append(" - echo ' ✓ GUI environment ready'")
|
|
1717
|
+
runcmd_lines.append(" - echo ' → ✓ [7/10] GUI environment ready'")
|
|
1621
1718
|
runcmd_lines.append(" - echo ''")
|
|
1622
1719
|
else:
|
|
1623
|
-
runcmd_lines.append(" - echo '[
|
|
1720
|
+
runcmd_lines.append(" - echo '[7/10] 🖥️ No GUI requested'")
|
|
1624
1721
|
runcmd_lines.append(" - echo ''")
|
|
1625
1722
|
|
|
1626
|
-
runcmd_lines.append(" - chown -R 1000:1000 /home/
|
|
1627
|
-
runcmd_lines.append(" - chown -R 1000:1000 /home/
|
|
1723
|
+
runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username} || true")
|
|
1724
|
+
runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/snap || true")
|
|
1628
1725
|
|
|
1629
|
-
# Phase
|
|
1726
|
+
# Phase 8: Snap packages
|
|
1630
1727
|
if config.snap_packages:
|
|
1631
|
-
runcmd_lines.append(f" - echo '[
|
|
1728
|
+
runcmd_lines.append(f" - echo '[8/10] 📦 Installing snap packages ({len(config.snap_packages)} packages)...'")
|
|
1729
|
+
# Check space before starting snap installation
|
|
1730
|
+
runcmd_lines.append(" - if [ $(df / --output=avail | tail -n 1) -lt 2097152 ]; then echo ' → ⚠️ WARNING: Low disk space (<2GB) before Snap install'; fi")
|
|
1632
1731
|
for i, snap_pkg in enumerate(config.snap_packages, 1):
|
|
1633
1732
|
runcmd_lines.append(f" - echo ' → [{i}/{len(config.snap_packages)}] {snap_pkg}'")
|
|
1634
1733
|
# Try classic first, then strict, with retries
|
|
1635
1734
|
cmd = (
|
|
1636
1735
|
f"for i in 1 2 3; do "
|
|
1637
|
-
f"snap install {snap_pkg} --classic && echo '
|
|
1638
|
-
f"snap install {snap_pkg} && echo '
|
|
1639
|
-
f"echo '
|
|
1736
|
+
f"snap install {snap_pkg} --classic && echo ' → ✓ {snap_pkg} installed (classic)' && break || "
|
|
1737
|
+
f"snap install {snap_pkg} && echo ' → ✓ {snap_pkg} installed' && break || "
|
|
1738
|
+
f"{{ if [ $i -eq 3 ]; then echo ' → ❌ Failed to install {snap_pkg} after 3 attempts'; else echo ' → ⟳ Retry $i/3...' && sleep 10; fi; }} "
|
|
1640
1739
|
f"done"
|
|
1641
1740
|
)
|
|
1642
1741
|
runcmd_lines.append(f" - {cmd}")
|
|
1643
|
-
runcmd_lines.append(" - echo ' ✓ Snap packages installed'")
|
|
1644
|
-
runcmd_lines.append(" -
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
runcmd_lines.append(f" - echo ' 🔌 Connecting snap interfaces...'")
|
|
1648
|
-
for snap_pkg in config.snap_packages:
|
|
1649
|
-
runcmd_lines.append(f" - echo ' → {snap_pkg}'")
|
|
1650
|
-
interfaces = SNAP_INTERFACES.get(snap_pkg, DEFAULT_SNAP_INTERFACES)
|
|
1651
|
-
for iface in interfaces:
|
|
1652
|
-
runcmd_lines.append(
|
|
1653
|
-
f" - snap connect {snap_pkg}:{iface} :{iface} 2>/dev/null || true"
|
|
1654
|
-
)
|
|
1655
|
-
runcmd_lines.append(" - echo ' ✓ Snap interfaces connected'")
|
|
1656
|
-
runcmd_lines.append(" - systemctl restart snapd || true")
|
|
1657
|
-
runcmd_lines.append(" - echo ''")
|
|
1658
|
-
else:
|
|
1659
|
-
runcmd_lines.append(" - echo '[7/9] 📦 No snap packages to install'")
|
|
1660
|
-
runcmd_lines.append(" - echo ''")
|
|
1661
|
-
|
|
1662
|
-
# Add remaining GUI setup if enabled
|
|
1663
|
-
if config.gui:
|
|
1664
|
-
runcmd_lines.append(" - echo ' ⚙️ Creating autostart entries...'")
|
|
1665
|
-
# Create autostart entries for GUI apps
|
|
1666
|
-
autostart_apps = {
|
|
1667
|
-
"pycharm-community": (
|
|
1668
|
-
"PyCharm Community",
|
|
1669
|
-
"/snap/bin/pycharm-community",
|
|
1670
|
-
"pycharm-community",
|
|
1671
|
-
),
|
|
1672
|
-
"firefox": ("Firefox", "/snap/bin/firefox", "firefox"),
|
|
1673
|
-
"chromium": ("Chromium", "/snap/bin/chromium", "chromium"),
|
|
1674
|
-
"google-chrome": ("Google Chrome", "google-chrome-stable", "google-chrome"),
|
|
1675
|
-
}
|
|
1676
|
-
|
|
1677
|
-
for snap_pkg in config.snap_packages:
|
|
1678
|
-
if snap_pkg in autostart_apps:
|
|
1679
|
-
name, exec_cmd, icon = autostart_apps[snap_pkg]
|
|
1680
|
-
desktop_entry = f"""[Desktop Entry]
|
|
1681
|
-
Type=Application
|
|
1682
|
-
Name={name}
|
|
1683
|
-
Exec={exec_cmd}
|
|
1684
|
-
Icon={icon}
|
|
1685
|
-
X-GNOME-Autostart-enabled=true
|
|
1686
|
-
X-GNOME-Autostart-Delay=5
|
|
1687
|
-
Comment=CloneBox autostart
|
|
1688
|
-
"""
|
|
1689
|
-
import base64
|
|
1690
|
-
|
|
1691
|
-
desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
|
|
1692
|
-
runcmd_lines.append(
|
|
1693
|
-
f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/{snap_pkg}.desktop"
|
|
1694
|
-
)
|
|
1695
|
-
|
|
1696
|
-
# Check if google-chrome is in paths (app_data_paths)
|
|
1697
|
-
wants_chrome = any("/google-chrome" in str(p) for p in (config.paths or {}).values())
|
|
1698
|
-
if wants_chrome:
|
|
1699
|
-
name, exec_cmd, icon = autostart_apps["google-chrome"]
|
|
1700
|
-
desktop_entry = f"""[Desktop Entry]
|
|
1701
|
-
Type=Application
|
|
1702
|
-
Name={name}
|
|
1703
|
-
Exec={exec_cmd}
|
|
1704
|
-
Icon={icon}
|
|
1705
|
-
X-GNOME-Autostart-enabled=true
|
|
1706
|
-
X-GNOME-Autostart-Delay=5
|
|
1707
|
-
Comment=CloneBox autostart
|
|
1708
|
-
"""
|
|
1709
|
-
desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
|
|
1710
|
-
runcmd_lines.append(
|
|
1711
|
-
f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/google-chrome.desktop"
|
|
1712
|
-
)
|
|
1713
|
-
|
|
1714
|
-
# Fix ownership of autostart directory
|
|
1715
|
-
runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/.config/autostart")
|
|
1716
|
-
runcmd_lines.append(" - echo ' ✓ Autostart entries created'")
|
|
1717
|
-
runcmd_lines.append(" - echo ''")
|
|
1718
|
-
|
|
1719
|
-
# Phase 8: Post commands
|
|
1742
|
+
runcmd_lines.append(" - echo ' → ✓ [8/10] Snap packages installed'")
|
|
1743
|
+
runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
|
|
1744
|
+
|
|
1745
|
+
# Phase 9: Post commands
|
|
1720
1746
|
if config.post_commands:
|
|
1721
|
-
runcmd_lines.append(f" - echo '[
|
|
1747
|
+
runcmd_lines.append(f" - echo '[9/10] ⚙️ Running post-setup commands ({len(config.post_commands)} total)...'")
|
|
1722
1748
|
for i, cmd in enumerate(config.post_commands, 1):
|
|
1723
1749
|
# Truncate long commands for display
|
|
1724
1750
|
display_cmd = cmd[:60] + '...' if len(cmd) > 60 else cmd
|
|
1725
1751
|
runcmd_lines.append(f" - echo ' → [{i}/{len(config.post_commands)}] {display_cmd}'")
|
|
1726
|
-
runcmd_lines.append(f" - {cmd}")
|
|
1727
|
-
runcmd_lines.append(f" - echo '
|
|
1728
|
-
runcmd_lines.append(" - echo ' ✓ Post-setup commands
|
|
1752
|
+
runcmd_lines.append(f" - {cmd} || echo ' → ❌ Command {i} failed'")
|
|
1753
|
+
runcmd_lines.append(f" - echo ' → ✓ Command {i} completed'")
|
|
1754
|
+
runcmd_lines.append(" - echo ' → ✓ [9/10] Post-setup commands finished'")
|
|
1729
1755
|
runcmd_lines.append(" - echo ''")
|
|
1730
1756
|
else:
|
|
1731
|
-
runcmd_lines.append(" - echo '[
|
|
1757
|
+
runcmd_lines.append(" - echo '[9/10] ⚙️ No post-setup commands'")
|
|
1732
1758
|
runcmd_lines.append(" - echo ''")
|
|
1733
1759
|
|
|
1734
1760
|
# Generate health check script
|
|
1735
1761
|
health_script = self._generate_health_check_script(config)
|
|
1736
|
-
# Phase
|
|
1737
|
-
runcmd_lines.append(" - echo '[
|
|
1762
|
+
# Phase 10: Health checks and finalization
|
|
1763
|
+
runcmd_lines.append(" - echo '[10/10] 🏥 Running health checks and final cleanup...'")
|
|
1764
|
+
runcmd_lines.append(" - echo ' → Vacuuming system logs'")
|
|
1765
|
+
runcmd_lines.append(" - journalctl --vacuum-size=50M >/dev/null 2>&1 || true")
|
|
1766
|
+
runcmd_lines.append(" - echo ' → Checking final disk usage'")
|
|
1767
|
+
runcmd_lines.append(" - df -h / | sed 's/^/ → /'")
|
|
1768
|
+
|
|
1738
1769
|
runcmd_lines.append(
|
|
1739
1770
|
f" - echo '{health_script}' | base64 -d > /usr/local/bin/clonebox-health"
|
|
1740
1771
|
)
|
|
@@ -1742,7 +1773,7 @@ Comment=CloneBox autostart
|
|
|
1742
1773
|
runcmd_lines.append(
|
|
1743
1774
|
" - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1 || true"
|
|
1744
1775
|
)
|
|
1745
|
-
runcmd_lines.append(" - echo ' ✓ Health checks completed'")
|
|
1776
|
+
runcmd_lines.append(" - echo ' → ✓ [10/10] Health checks completed'")
|
|
1746
1777
|
runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
|
|
1747
1778
|
|
|
1748
1779
|
# Final status
|
|
@@ -1818,7 +1849,7 @@ set -uo pipefail
|
|
|
1818
1849
|
|
|
1819
1850
|
RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' CYAN='\033[0;36m' NC='\033[0m' BOLD='\033[1m'
|
|
1820
1851
|
|
|
1821
|
-
show_help() {
|
|
1852
|
+
show_help() {{
|
|
1822
1853
|
echo -e "${BOLD}${CYAN}CloneBox Repair Utility${NC}"
|
|
1823
1854
|
echo ""
|
|
1824
1855
|
echo "Usage: clonebox-repair [OPTION]"
|
|
@@ -1836,9 +1867,10 @@ show_help() {
|
|
|
1836
1867
|
echo " --help Show this help message"
|
|
1837
1868
|
echo ""
|
|
1838
1869
|
echo "Without options, shows interactive menu."
|
|
1839
|
-
}
|
|
1870
|
+
}}
|
|
1840
1871
|
|
|
1841
|
-
show_status() {
|
|
1872
|
+
show_status() {{
|
|
1873
|
+
echo ""
|
|
1842
1874
|
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1843
1875
|
echo -e "${BOLD}${CYAN} CloneBox VM Status${NC}"
|
|
1844
1876
|
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
@@ -1857,17 +1889,17 @@ show_status() {
|
|
|
1857
1889
|
echo ""
|
|
1858
1890
|
echo -e " Last boot diagnostic: $(stat -c %y /var/log/clonebox-boot.log 2>/dev/null || echo 'never')"
|
|
1859
1891
|
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1860
|
-
}
|
|
1892
|
+
}}
|
|
1861
1893
|
|
|
1862
|
-
show_logs() {
|
|
1894
|
+
show_logs() {{
|
|
1863
1895
|
echo -e "${BOLD}Recent repair logs:${NC}"
|
|
1864
1896
|
echo ""
|
|
1865
1897
|
tail -n 50 /var/log/clonebox-boot.log 2>/dev/null || echo "No logs found"
|
|
1866
|
-
}
|
|
1898
|
+
}}
|
|
1867
1899
|
|
|
1868
|
-
fix_permissions() {
|
|
1900
|
+
fix_permissions() {{
|
|
1869
1901
|
echo -e "${CYAN}Fixing directory permissions...${NC}"
|
|
1870
|
-
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1902
|
+
VM_USER="${{SUDO_USER:-ubuntu}}"
|
|
1871
1903
|
VM_HOME="/home/$VM_USER"
|
|
1872
1904
|
|
|
1873
1905
|
DIRS_TO_CREATE=(
|
|
@@ -1885,7 +1917,7 @@ fix_permissions() {
|
|
|
1885
1917
|
"$VM_HOME/.local/share/keyrings"
|
|
1886
1918
|
)
|
|
1887
1919
|
|
|
1888
|
-
for dir in "${DIRS_TO_CREATE[@]}"; do
|
|
1920
|
+
for dir in "${{DIRS_TO_CREATE[@]}}"; do
|
|
1889
1921
|
if [ ! -d "$dir" ]; then
|
|
1890
1922
|
mkdir -p "$dir" 2>/dev/null && echo " Created $dir"
|
|
1891
1923
|
fi
|
|
@@ -1899,11 +1931,11 @@ fix_permissions() {
|
|
|
1899
1931
|
done
|
|
1900
1932
|
|
|
1901
1933
|
echo -e "${GREEN}✅ Permissions fixed${NC}"
|
|
1902
|
-
}
|
|
1934
|
+
}}
|
|
1903
1935
|
|
|
1904
|
-
fix_audio() {
|
|
1936
|
+
fix_audio() {{
|
|
1905
1937
|
echo -e "${CYAN}Fixing audio (PulseAudio/PipeWire)...${NC}"
|
|
1906
|
-
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1938
|
+
VM_USER="${{SUDO_USER:-ubuntu}}"
|
|
1907
1939
|
VM_HOME="/home/$VM_USER"
|
|
1908
1940
|
|
|
1909
1941
|
# Create pulse config directory with correct permissions
|
|
@@ -1928,11 +1960,11 @@ fix_audio() {
|
|
|
1928
1960
|
systemctl --user restart pipewire pipewire-pulse 2>/dev/null || true
|
|
1929
1961
|
|
|
1930
1962
|
echo -e "${GREEN}✅ Audio fixed${NC}"
|
|
1931
|
-
}
|
|
1963
|
+
}}
|
|
1932
1964
|
|
|
1933
|
-
fix_keyring() {
|
|
1965
|
+
fix_keyring() {{
|
|
1934
1966
|
echo -e "${CYAN}Resetting GNOME Keyring...${NC}"
|
|
1935
|
-
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1967
|
+
VM_USER="${{SUDO_USER:-ubuntu}}"
|
|
1936
1968
|
VM_HOME="/home/$VM_USER"
|
|
1937
1969
|
KEYRING_DIR="$VM_HOME/.local/share/keyrings"
|
|
1938
1970
|
|
|
@@ -1960,11 +1992,11 @@ fix_keyring() {
|
|
|
1960
1992
|
pkill -u "$VM_USER" gnome-keyring-daemon 2>/dev/null || true
|
|
1961
1993
|
|
|
1962
1994
|
echo -e "${GREEN}✅ Keyring reset - log out and back in to create new keyring${NC}"
|
|
1963
|
-
}
|
|
1995
|
+
}}
|
|
1964
1996
|
|
|
1965
|
-
fix_ibus() {
|
|
1997
|
+
fix_ibus() {{
|
|
1966
1998
|
echo -e "${CYAN}Fixing IBus input method...${NC}"
|
|
1967
|
-
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1999
|
+
VM_USER="${{SUDO_USER:-ubuntu}}"
|
|
1968
2000
|
VM_HOME="/home/$VM_USER"
|
|
1969
2001
|
|
|
1970
2002
|
# Create ibus cache directory
|
|
@@ -1980,25 +2012,29 @@ fix_ibus() {
|
|
|
1980
2012
|
fi
|
|
1981
2013
|
|
|
1982
2014
|
echo -e "${GREEN}✅ IBus fixed${NC}"
|
|
1983
|
-
}
|
|
2015
|
+
}}
|
|
1984
2016
|
|
|
1985
|
-
fix_snaps() {
|
|
2017
|
+
fix_snaps() {{
|
|
1986
2018
|
echo -e "${CYAN}Reconnecting snap interfaces...${NC}"
|
|
1987
2019
|
IFACES="desktop desktop-legacy x11 wayland home network audio-playback audio-record camera opengl"
|
|
1988
2020
|
|
|
1989
2021
|
for snap in $(snap list --color=never 2>/dev/null | tail -n +2 | awk '{print $1}'); do
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
2022
|
+
case "$snap" in
|
|
2023
|
+
pycharm-community|chromium|firefox|code|slack|spotify)
|
|
2024
|
+
echo "Connecting interfaces for $snap..."
|
|
2025
|
+
IFACES="desktop desktop-legacy x11 wayland home network network-bind audio-playback"
|
|
2026
|
+
for iface in $IFACES; do
|
|
2027
|
+
snap connect "$snap:$iface" ":$iface" 2>/dev/null || true
|
|
2028
|
+
done
|
|
2029
|
+
;;
|
|
2030
|
+
esac
|
|
1995
2031
|
done
|
|
1996
2032
|
|
|
1997
2033
|
systemctl restart snapd 2>/dev/null || true
|
|
1998
2034
|
echo -e "${GREEN}✅ Snap interfaces reconnected${NC}"
|
|
1999
|
-
}
|
|
2035
|
+
}}
|
|
2000
2036
|
|
|
2001
|
-
fix_mounts() {
|
|
2037
|
+
fix_mounts() {{
|
|
2002
2038
|
echo -e "${CYAN}Remounting filesystems...${NC}"
|
|
2003
2039
|
|
|
2004
2040
|
while IFS= read -r line; do
|
|
@@ -2019,9 +2055,9 @@ fix_mounts() {
|
|
|
2019
2055
|
done < /etc/fstab
|
|
2020
2056
|
|
|
2021
2057
|
echo -e "${GREEN}✅ Mounts checked${NC}"
|
|
2022
|
-
}
|
|
2058
|
+
}}
|
|
2023
2059
|
|
|
2024
|
-
fix_all() {
|
|
2060
|
+
fix_all() {{
|
|
2025
2061
|
echo -e "${BOLD}${CYAN}Running all fixes...${NC}"
|
|
2026
2062
|
echo ""
|
|
2027
2063
|
fix_permissions
|
|
@@ -2035,9 +2071,9 @@ fix_all() {
|
|
|
2035
2071
|
fix_mounts
|
|
2036
2072
|
echo ""
|
|
2037
2073
|
echo -e "${BOLD}${GREEN}All fixes completed!${NC}"
|
|
2038
|
-
}
|
|
2074
|
+
}}
|
|
2039
2075
|
|
|
2040
|
-
interactive_menu() {
|
|
2076
|
+
interactive_menu() {{
|
|
2041
2077
|
while true; do
|
|
2042
2078
|
echo ""
|
|
2043
2079
|
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
@@ -2071,7 +2107,7 @@ interactive_menu() {
|
|
|
2071
2107
|
*) echo -e "${RED}Invalid option${NC}" ;;
|
|
2072
2108
|
esac
|
|
2073
2109
|
done
|
|
2074
|
-
}
|
|
2110
|
+
}}
|
|
2075
2111
|
|
|
2076
2112
|
# Main
|
|
2077
2113
|
case "${1:-}" in
|
|
@@ -2488,21 +2524,21 @@ if __name__ == "__main__":
|
|
|
2488
2524
|
[
|
|
2489
2525
|
" - mkdir -p /var/lib/clonebox /mnt/logs",
|
|
2490
2526
|
f" - truncate -s 1G {vm_logs_img_path}",
|
|
2491
|
-
f" - mkfs.ext4 -F {vm_logs_img_path}",
|
|
2527
|
+
f" - mkfs.ext4 -F {vm_logs_img_path} >/dev/null 2>&1",
|
|
2492
2528
|
f" - echo '{vm_logs_img_path} /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
|
|
2493
|
-
" - mount
|
|
2529
|
+
" - mount /mnt/logs || echo ' → ❌ Failed to mount logs disk'",
|
|
2494
2530
|
" - mkdir -p /mnt/logs/var/log",
|
|
2495
2531
|
" - mkdir -p /mnt/logs/tmp",
|
|
2496
2532
|
" - cp -r /var/log/clonebox*.log /mnt/logs/var/log/ 2>/dev/null || true",
|
|
2497
2533
|
" - cp -r /tmp/*-error.log /mnt/logs/tmp/ 2>/dev/null || true",
|
|
2498
|
-
f" - echo 'Logs disk mounted at /mnt/logs
|
|
2534
|
+
f" - echo ' → ✓ Logs disk mounted at /mnt/logs'",
|
|
2499
2535
|
]
|
|
2500
2536
|
)
|
|
2501
2537
|
|
|
2502
2538
|
# Add reboot command at the end if GUI is enabled
|
|
2503
2539
|
if config.gui:
|
|
2504
|
-
runcmd_lines.append(" - echo '🔄 Rebooting in 10 seconds to start GUI...'")
|
|
2505
|
-
runcmd_lines.append(" - echo '
|
|
2540
|
+
runcmd_lines.append(" - echo ' → 🔄 Rebooting in 10 seconds to start GUI...'")
|
|
2541
|
+
runcmd_lines.append(" - echo ' → (After reboot, GUI will auto-start)'")
|
|
2506
2542
|
runcmd_lines.append(" - sleep 10 && reboot")
|
|
2507
2543
|
|
|
2508
2544
|
runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
|
|
@@ -2604,12 +2640,17 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
|
|
|
2604
2640
|
log("[green]✅ VM started![/]")
|
|
2605
2641
|
|
|
2606
2642
|
if open_viewer:
|
|
2607
|
-
|
|
2608
|
-
|
|
2609
|
-
[
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
|
|
2643
|
+
import shutil
|
|
2644
|
+
if shutil.which("virt-viewer"):
|
|
2645
|
+
log("[cyan]🖥️ Opening virt-viewer...[/]")
|
|
2646
|
+
subprocess.Popen(
|
|
2647
|
+
["virt-viewer", "-c", self.conn_uri, vm_name],
|
|
2648
|
+
stdout=subprocess.DEVNULL,
|
|
2649
|
+
stderr=subprocess.DEVNULL,
|
|
2650
|
+
)
|
|
2651
|
+
else:
|
|
2652
|
+
log("[yellow]⚠️ Warning: 'virt-viewer' not found. Cannot open console automatically.[/]")
|
|
2653
|
+
log("[dim] Install it with: sudo apt install virt-viewer[/]")
|
|
2613
2654
|
|
|
2614
2655
|
return True
|
|
2615
2656
|
|
|
@@ -2625,6 +2666,8 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
|
|
|
2625
2666
|
try:
|
|
2626
2667
|
vm = self.conn.lookupByName(vm_name)
|
|
2627
2668
|
except libvirt.libvirtError:
|
|
2669
|
+
if ignore_not_found:
|
|
2670
|
+
return True
|
|
2628
2671
|
log(f"[red]❌ VM '{vm_name}' not found[/]")
|
|
2629
2672
|
return False
|
|
2630
2673
|
|
|
@@ -2672,6 +2715,7 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
|
|
|
2672
2715
|
delete_storage: bool = True,
|
|
2673
2716
|
console=None,
|
|
2674
2717
|
ignore_not_found: bool = False,
|
|
2718
|
+
approved: bool = False,
|
|
2675
2719
|
) -> bool:
|
|
2676
2720
|
"""Delete a VM and optionally its storage."""
|
|
2677
2721
|
|
|
@@ -2681,9 +2725,18 @@ final_message: "CloneBox VM is ready after $UPTIME seconds"
|
|
|
2681
2725
|
else:
|
|
2682
2726
|
print(msg)
|
|
2683
2727
|
|
|
2728
|
+
policy = PolicyEngine.load_effective()
|
|
2729
|
+
if policy is not None:
|
|
2730
|
+
policy.assert_operation_approved(
|
|
2731
|
+
AuditEventType.VM_DELETE.value,
|
|
2732
|
+
approved=approved,
|
|
2733
|
+
)
|
|
2734
|
+
|
|
2684
2735
|
try:
|
|
2685
2736
|
vm = self.conn.lookupByName(vm_name)
|
|
2686
2737
|
except libvirt.libvirtError:
|
|
2738
|
+
if ignore_not_found:
|
|
2739
|
+
return True
|
|
2687
2740
|
log(f"[red]❌ VM '{vm_name}' not found[/]")
|
|
2688
2741
|
return False
|
|
2689
2742
|
|