clonebox 0.1.21__py3-none-any.whl → 0.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/cli.py +188 -6
- clonebox/cloner.py +1012 -19
- clonebox/validator.py +361 -29
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/METADATA +3 -1
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/RECORD +9 -9
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/WHEEL +0 -0
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/entry_points.txt +0 -0
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/licenses/LICENSE +0 -0
- {clonebox-0.1.21.dist-info → clonebox-0.1.23.dist-info}/top_level.txt +0 -0
clonebox/cloner.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
SelectiveVMCloner - Creates isolated VMs with only selected apps/paths/services.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
import json
|
|
6
7
|
import os
|
|
7
8
|
import subprocess
|
|
8
9
|
import tempfile
|
|
@@ -13,6 +14,12 @@ from dataclasses import dataclass, field
|
|
|
13
14
|
from pathlib import Path
|
|
14
15
|
from typing import Optional
|
|
15
16
|
|
|
17
|
+
try:
|
|
18
|
+
from dotenv import load_dotenv
|
|
19
|
+
load_dotenv()
|
|
20
|
+
except ImportError:
|
|
21
|
+
pass # dotenv is optional
|
|
22
|
+
|
|
16
23
|
try:
|
|
17
24
|
import libvirt
|
|
18
25
|
except ImportError:
|
|
@@ -33,21 +40,23 @@ DEFAULT_SNAP_INTERFACES = ['desktop', 'desktop-legacy', 'x11', 'home', 'network'
|
|
|
33
40
|
class VMConfig:
|
|
34
41
|
"""Configuration for the VM to create."""
|
|
35
42
|
|
|
36
|
-
name: str = "clonebox-vm"
|
|
37
|
-
ram_mb: int =
|
|
38
|
-
vcpus: int = 4
|
|
39
|
-
disk_size_gb: int = 20
|
|
40
|
-
gui: bool =
|
|
41
|
-
base_image: Optional[str] = None
|
|
43
|
+
name: str = field(default_factory=lambda: os.getenv("VM_NAME", "clonebox-vm"))
|
|
44
|
+
ram_mb: int = field(default_factory=lambda: int(os.getenv("VM_RAM_MB", "8192")))
|
|
45
|
+
vcpus: int = field(default_factory=lambda: int(os.getenv("VM_VCPUS", "4")))
|
|
46
|
+
disk_size_gb: int = field(default_factory=lambda: int(os.getenv("VM_DISK_SIZE_GB", "20")))
|
|
47
|
+
gui: bool = field(default_factory=lambda: os.getenv("VM_GUI", "true").lower() == "true")
|
|
48
|
+
base_image: Optional[str] = field(default_factory=lambda: os.getenv("VM_BASE_IMAGE") or None)
|
|
42
49
|
paths: dict = field(default_factory=dict)
|
|
43
50
|
packages: list = field(default_factory=list)
|
|
44
51
|
snap_packages: list = field(default_factory=list) # Snap packages to install
|
|
45
52
|
services: list = field(default_factory=list)
|
|
46
53
|
post_commands: list = field(default_factory=list) # Commands to run after setup
|
|
47
|
-
user_session: bool =
|
|
48
|
-
network_mode: str = "auto" # auto|default|user
|
|
49
|
-
username: str = "ubuntu" # VM default username
|
|
50
|
-
password: str = "ubuntu" # VM default password
|
|
54
|
+
user_session: bool = field(default_factory=lambda: os.getenv("VM_USER_SESSION", "false").lower() == "true") # Use qemu:///session instead of qemu:///system
|
|
55
|
+
network_mode: str = field(default_factory=lambda: os.getenv("VM_NETWORK_MODE", "auto")) # auto|default|user
|
|
56
|
+
username: str = field(default_factory=lambda: os.getenv("VM_USERNAME", "ubuntu")) # VM default username
|
|
57
|
+
password: str = field(default_factory=lambda: os.getenv("VM_PASSWORD", "ubuntu")) # VM default password
|
|
58
|
+
autostart_apps: bool = field(default_factory=lambda: os.getenv("VM_AUTOSTART_APPS", "true").lower() == "true") # Auto-start GUI apps after login (desktop autostart)
|
|
59
|
+
web_services: list = field(default_factory=list) # Web services to start (uvicorn, etc.)
|
|
51
60
|
|
|
52
61
|
def to_dict(self) -> dict:
|
|
53
62
|
return {
|
|
@@ -63,15 +72,6 @@ class SelectiveVMCloner:
|
|
|
63
72
|
Uses bind mounts instead of full disk cloning.
|
|
64
73
|
"""
|
|
65
74
|
|
|
66
|
-
# Default images directories
|
|
67
|
-
SYSTEM_IMAGES_DIR = Path("/var/lib/libvirt/images")
|
|
68
|
-
USER_IMAGES_DIR = Path.home() / ".local/share/libvirt/images"
|
|
69
|
-
|
|
70
|
-
DEFAULT_BASE_IMAGE_URL = (
|
|
71
|
-
"https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
|
72
|
-
)
|
|
73
|
-
DEFAULT_BASE_IMAGE_FILENAME = "clonebox-ubuntu-jammy-amd64.qcow2"
|
|
74
|
-
|
|
75
75
|
def __init__(self, conn_uri: str = None, user_session: bool = False):
|
|
76
76
|
self.user_session = user_session
|
|
77
77
|
if conn_uri:
|
|
@@ -81,6 +81,28 @@ class SelectiveVMCloner:
|
|
|
81
81
|
self.conn = None
|
|
82
82
|
self._connect()
|
|
83
83
|
|
|
84
|
+
@property
|
|
85
|
+
def SYSTEM_IMAGES_DIR(self) -> Path:
|
|
86
|
+
return Path(os.getenv("CLONEBOX_SYSTEM_IMAGES_DIR", "/var/lib/libvirt/images"))
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def USER_IMAGES_DIR(self) -> Path:
|
|
90
|
+
return Path(os.getenv("CLONEBOX_USER_IMAGES_DIR", str(Path.home() / ".local/share/libvirt/images"))).expanduser()
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def DEFAULT_BASE_IMAGE_URL(self) -> str:
|
|
94
|
+
return os.getenv(
|
|
95
|
+
"CLONEBOX_BASE_IMAGE_URL",
|
|
96
|
+
"https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def DEFAULT_BASE_IMAGE_FILENAME(self) -> str:
|
|
101
|
+
return os.getenv(
|
|
102
|
+
"CLONEBOX_BASE_IMAGE_FILENAME",
|
|
103
|
+
"clonebox-ubuntu-jammy-amd64.qcow2"
|
|
104
|
+
)
|
|
105
|
+
|
|
84
106
|
def _connect(self):
|
|
85
107
|
"""Connect to libvirt."""
|
|
86
108
|
if libvirt is None:
|
|
@@ -487,6 +509,308 @@ class SelectiveVMCloner:
|
|
|
487
509
|
|
|
488
510
|
return ET.tostring(root, encoding="unicode")
|
|
489
511
|
|
|
512
|
+
def _generate_boot_diagnostic_script(self, config: VMConfig) -> str:
|
|
513
|
+
"""Generate boot diagnostic script with self-healing capabilities."""
|
|
514
|
+
import base64
|
|
515
|
+
|
|
516
|
+
wants_google_chrome = any(
|
|
517
|
+
p == "/home/ubuntu/.config/google-chrome" for p in (config.paths or {}).values()
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
apt_pkg_list = list(config.packages or [])
|
|
521
|
+
for base_pkg in ["qemu-guest-agent", "cloud-guest-utils"]:
|
|
522
|
+
if base_pkg not in apt_pkg_list:
|
|
523
|
+
apt_pkg_list.insert(0, base_pkg)
|
|
524
|
+
if config.gui:
|
|
525
|
+
for gui_pkg in ["ubuntu-desktop-minimal", "firefox"]:
|
|
526
|
+
if gui_pkg not in apt_pkg_list:
|
|
527
|
+
apt_pkg_list.append(gui_pkg)
|
|
528
|
+
|
|
529
|
+
apt_packages = " ".join(f'"{p}"' for p in apt_pkg_list) if apt_pkg_list else ""
|
|
530
|
+
snap_packages = " ".join(f'"{p}"' for p in config.snap_packages) if config.snap_packages else ""
|
|
531
|
+
services = " ".join(f'"{s}"' for s in config.services) if config.services else ""
|
|
532
|
+
|
|
533
|
+
snap_ifaces_bash = "\n".join(
|
|
534
|
+
f'SNAP_INTERFACES["{snap}"]="{" ".join(ifaces)}"'
|
|
535
|
+
for snap, ifaces in SNAP_INTERFACES.items()
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
script = f'''#!/bin/bash
|
|
539
|
+
set -uo pipefail
|
|
540
|
+
LOG="/var/log/clonebox-boot.log"
|
|
541
|
+
STATUS_KV="/var/run/clonebox-status"
|
|
542
|
+
STATUS_JSON="/var/run/clonebox-status.json"
|
|
543
|
+
MAX_RETRIES=3
|
|
544
|
+
PASSED=0 FAILED=0 REPAIRED=0 TOTAL=0
|
|
545
|
+
|
|
546
|
+
RED='\\033[0;31m' GREEN='\\033[0;32m' YELLOW='\\033[1;33m' CYAN='\\033[0;36m' NC='\\033[0m' BOLD='\\033[1m'
|
|
547
|
+
|
|
548
|
+
log() {{ echo -e "[$(date +%H:%M:%S)] $1" | tee -a "$LOG"; }}
|
|
549
|
+
ok() {{ log "${{GREEN}}✅ $1${{NC}}"; ((PASSED++)); ((TOTAL++)); }}
|
|
550
|
+
fail() {{ log "${{RED}}❌ $1${{NC}}"; ((FAILED++)); ((TOTAL++)); }}
|
|
551
|
+
repair() {{ log "${{YELLOW}}🔧 $1${{NC}}"; }}
|
|
552
|
+
section() {{ log ""; log "${{BOLD}}[$1] $2${{NC}}"; }}
|
|
553
|
+
|
|
554
|
+
write_status() {{
|
|
555
|
+
local phase="$1"
|
|
556
|
+
local current_task="${{2:-}}"
|
|
557
|
+
printf 'passed=%s failed=%s repaired=%s\n' "$PASSED" "$FAILED" "$REPAIRED" > "$STATUS_KV" 2>/dev/null || true
|
|
558
|
+
cat > "$STATUS_JSON" <<EOF
|
|
559
|
+
{{"phase":"$phase","current_task":"$current_task","total":$TOTAL,"passed":$PASSED,"failed":$FAILED,"repaired":$REPAIRED,"timestamp":"$(date -Iseconds)"}}
|
|
560
|
+
EOF
|
|
561
|
+
}}
|
|
562
|
+
|
|
563
|
+
header() {{
|
|
564
|
+
log ""
|
|
565
|
+
log "${{BOLD}}${{CYAN}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
566
|
+
log "${{BOLD}}${{CYAN}} $1${{NC}}"
|
|
567
|
+
log "${{BOLD}}${{CYAN}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
568
|
+
}}
|
|
569
|
+
|
|
570
|
+
declare -A SNAP_INTERFACES
|
|
571
|
+
{snap_ifaces_bash}
|
|
572
|
+
DEFAULT_IFACES="desktop desktop-legacy x11 home network"
|
|
573
|
+
|
|
574
|
+
check_apt() {{
|
|
575
|
+
dpkg -l "$1" 2>/dev/null | grep -q "^ii"
|
|
576
|
+
}}
|
|
577
|
+
|
|
578
|
+
install_apt() {{
|
|
579
|
+
for i in $(seq 1 $MAX_RETRIES); do
|
|
580
|
+
DEBIAN_FRONTEND=noninteractive apt-get install -y "$1" &>>"$LOG" && return 0
|
|
581
|
+
sleep 3
|
|
582
|
+
done
|
|
583
|
+
return 1
|
|
584
|
+
}}
|
|
585
|
+
|
|
586
|
+
check_snap() {{
|
|
587
|
+
snap list "$1" &>/dev/null
|
|
588
|
+
}}
|
|
589
|
+
|
|
590
|
+
install_snap() {{
|
|
591
|
+
timeout 60 snap wait system seed.loaded 2>/dev/null || true
|
|
592
|
+
for i in $(seq 1 $MAX_RETRIES); do
|
|
593
|
+
snap install "$1" --classic &>>"$LOG" && return 0
|
|
594
|
+
snap install "$1" &>>"$LOG" && return 0
|
|
595
|
+
sleep 5
|
|
596
|
+
done
|
|
597
|
+
return 1
|
|
598
|
+
}}
|
|
599
|
+
|
|
600
|
+
connect_interfaces() {{
|
|
601
|
+
local snap="$1"
|
|
602
|
+
local ifaces="${{SNAP_INTERFACES[$snap]:-$DEFAULT_IFACES}}"
|
|
603
|
+
for iface in $ifaces; do
|
|
604
|
+
snap connect "$snap:$iface" ":$iface" 2>/dev/null && log " ${{GREEN}}✓${{NC}} $snap:$iface" || true
|
|
605
|
+
done
|
|
606
|
+
}}
|
|
607
|
+
|
|
608
|
+
test_launch() {{
|
|
609
|
+
case "$1" in
|
|
610
|
+
pycharm-community) /snap/pycharm-community/current/jbr/bin/java -version &>/dev/null ;;
|
|
611
|
+
chromium) timeout 10 chromium --headless=new --dump-dom about:blank &>/dev/null ;;
|
|
612
|
+
firefox) timeout 10 firefox --headless --screenshot /tmp/ff-test.png about:blank &>/dev/null; rm -f /tmp/ff-test.png ;;
|
|
613
|
+
docker) docker info &>/dev/null ;;
|
|
614
|
+
*) command -v "$1" &>/dev/null ;;
|
|
615
|
+
esac
|
|
616
|
+
}}
|
|
617
|
+
|
|
618
|
+
header "CloneBox VM Boot Diagnostic"
|
|
619
|
+
write_status "starting" "boot diagnostic starting"
|
|
620
|
+
|
|
621
|
+
APT_PACKAGES=({apt_packages})
|
|
622
|
+
SNAP_PACKAGES=({snap_packages})
|
|
623
|
+
SERVICES=({services})
|
|
624
|
+
VM_USER="${{SUDO_USER:-ubuntu}}"
|
|
625
|
+
VM_HOME="/home/$VM_USER"
|
|
626
|
+
|
|
627
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
628
|
+
# Section 0: Fix permissions for GNOME directories (runs first!)
|
|
629
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
630
|
+
section "0/7" "Fixing directory permissions..."
|
|
631
|
+
write_status "fixing_permissions" "fixing directory permissions"
|
|
632
|
+
|
|
633
|
+
GNOME_DIRS=(
|
|
634
|
+
"$VM_HOME/.config"
|
|
635
|
+
"$VM_HOME/.config/pulse"
|
|
636
|
+
"$VM_HOME/.config/dconf"
|
|
637
|
+
"$VM_HOME/.config/ibus"
|
|
638
|
+
"$VM_HOME/.cache"
|
|
639
|
+
"$VM_HOME/.cache/ibus"
|
|
640
|
+
"$VM_HOME/.cache/tracker3"
|
|
641
|
+
"$VM_HOME/.cache/mesa_shader_cache"
|
|
642
|
+
"$VM_HOME/.local"
|
|
643
|
+
"$VM_HOME/.local/share"
|
|
644
|
+
"$VM_HOME/.local/share/applications"
|
|
645
|
+
"$VM_HOME/.local/share/keyrings"
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
for dir in "${{GNOME_DIRS[@]}}"; do
|
|
649
|
+
if [ ! -d "$dir" ]; then
|
|
650
|
+
mkdir -p "$dir" 2>/dev/null && log " Created $dir" || true
|
|
651
|
+
fi
|
|
652
|
+
done
|
|
653
|
+
|
|
654
|
+
# Fix ownership for all critical directories
|
|
655
|
+
chown -R 1000:1000 "$VM_HOME/.config" "$VM_HOME/.cache" "$VM_HOME/.local" 2>/dev/null || true
|
|
656
|
+
chmod 700 "$VM_HOME/.config" "$VM_HOME/.cache" 2>/dev/null || true
|
|
657
|
+
|
|
658
|
+
# Fix snap directories ownership
|
|
659
|
+
for snap_dir in "$VM_HOME/snap"/*; do
|
|
660
|
+
[ -d "$snap_dir" ] && chown -R 1000:1000 "$snap_dir" 2>/dev/null || true
|
|
661
|
+
done
|
|
662
|
+
|
|
663
|
+
ok "Directory permissions fixed"
|
|
664
|
+
|
|
665
|
+
section "1/7" "Checking APT packages..."
|
|
666
|
+
write_status "checking_apt" "checking APT packages"
|
|
667
|
+
for pkg in "${{APT_PACKAGES[@]}}"; do
|
|
668
|
+
[ -z "$pkg" ] && continue
|
|
669
|
+
if check_apt "$pkg"; then
|
|
670
|
+
ok "$pkg"
|
|
671
|
+
else
|
|
672
|
+
repair "Installing $pkg..."
|
|
673
|
+
if install_apt "$pkg"; then
|
|
674
|
+
ok "$pkg installed"
|
|
675
|
+
((REPAIRED++))
|
|
676
|
+
else
|
|
677
|
+
fail "$pkg FAILED"
|
|
678
|
+
fi
|
|
679
|
+
fi
|
|
680
|
+
done
|
|
681
|
+
|
|
682
|
+
section "2/7" "Checking Snap packages..."
|
|
683
|
+
write_status "checking_snaps" "checking snap packages"
|
|
684
|
+
timeout 120 snap wait system seed.loaded 2>/dev/null || true
|
|
685
|
+
for pkg in "${{SNAP_PACKAGES[@]}}"; do
|
|
686
|
+
[ -z "$pkg" ] && continue
|
|
687
|
+
if check_snap "$pkg"; then
|
|
688
|
+
ok "$pkg (snap)"
|
|
689
|
+
else
|
|
690
|
+
repair "Installing $pkg..."
|
|
691
|
+
if install_snap "$pkg"; then
|
|
692
|
+
ok "$pkg installed"
|
|
693
|
+
((REPAIRED++))
|
|
694
|
+
else
|
|
695
|
+
fail "$pkg FAILED"
|
|
696
|
+
fi
|
|
697
|
+
fi
|
|
698
|
+
done
|
|
699
|
+
|
|
700
|
+
section "3/7" "Connecting Snap interfaces..."
|
|
701
|
+
write_status "connecting_interfaces" "connecting snap interfaces"
|
|
702
|
+
for pkg in "${{SNAP_PACKAGES[@]}}"; do
|
|
703
|
+
[ -z "$pkg" ] && continue
|
|
704
|
+
check_snap "$pkg" && connect_interfaces "$pkg"
|
|
705
|
+
done
|
|
706
|
+
systemctl restart snapd 2>/dev/null || true
|
|
707
|
+
|
|
708
|
+
section "4/7" "Testing application launch..."
|
|
709
|
+
write_status "testing_launch" "testing application launch"
|
|
710
|
+
APPS_TO_TEST=()
|
|
711
|
+
for pkg in "${{SNAP_PACKAGES[@]}}"; do
|
|
712
|
+
[ -z "$pkg" ] && continue
|
|
713
|
+
APPS_TO_TEST+=("$pkg")
|
|
714
|
+
done
|
|
715
|
+
if [ "{str(wants_google_chrome).lower()}" = "true" ]; then
|
|
716
|
+
APPS_TO_TEST+=("google-chrome")
|
|
717
|
+
fi
|
|
718
|
+
if printf '%s\n' "${{APT_PACKAGES[@]}}" | grep -qx "docker.io"; then
|
|
719
|
+
APPS_TO_TEST+=("docker")
|
|
720
|
+
fi
|
|
721
|
+
|
|
722
|
+
for app in "${{APPS_TO_TEST[@]}}"; do
|
|
723
|
+
[ -z "$app" ] && continue
|
|
724
|
+
case "$app" in
|
|
725
|
+
google-chrome)
|
|
726
|
+
if ! command -v google-chrome >/dev/null 2>&1 && ! command -v google-chrome-stable >/dev/null 2>&1; then
|
|
727
|
+
repair "Installing google-chrome..."
|
|
728
|
+
tmp_deb="/tmp/google-chrome-stable_current_amd64.deb"
|
|
729
|
+
if curl -fsSL -o "$tmp_deb" "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb" \
|
|
730
|
+
&& DEBIAN_FRONTEND=noninteractive apt-get install -y "$tmp_deb" &>>"$LOG"; then
|
|
731
|
+
rm -f "$tmp_deb"
|
|
732
|
+
((REPAIRED++))
|
|
733
|
+
else
|
|
734
|
+
rm -f "$tmp_deb" 2>/dev/null || true
|
|
735
|
+
fi
|
|
736
|
+
fi
|
|
737
|
+
;;
|
|
738
|
+
docker)
|
|
739
|
+
check_apt "docker.io" || continue
|
|
740
|
+
;;
|
|
741
|
+
*)
|
|
742
|
+
if check_snap "$app"; then
|
|
743
|
+
:
|
|
744
|
+
else
|
|
745
|
+
continue
|
|
746
|
+
fi
|
|
747
|
+
;;
|
|
748
|
+
esac
|
|
749
|
+
|
|
750
|
+
if test_launch "$app"; then
|
|
751
|
+
ok "$app launches OK"
|
|
752
|
+
else
|
|
753
|
+
fail "$app launch test FAILED"
|
|
754
|
+
fi
|
|
755
|
+
done
|
|
756
|
+
|
|
757
|
+
section "5/7" "Checking mount points..."
|
|
758
|
+
write_status "checking_mounts" "checking mount points"
|
|
759
|
+
while IFS= read -r line; do
|
|
760
|
+
tag=$(echo "$line" | awk '{{print $1}}')
|
|
761
|
+
mp=$(echo "$line" | awk '{{print $2}}')
|
|
762
|
+
if [[ "$tag" =~ ^mount[0-9]+$ ]] && [[ "$mp" == /* ]]; then
|
|
763
|
+
if mountpoint -q "$mp" 2>/dev/null; then
|
|
764
|
+
ok "$mp mounted"
|
|
765
|
+
else
|
|
766
|
+
repair "Mounting $mp..."
|
|
767
|
+
mkdir -p "$mp" 2>/dev/null || true
|
|
768
|
+
if mount "$mp" &>>"$LOG"; then
|
|
769
|
+
ok "$mp mounted"
|
|
770
|
+
((REPAIRED++))
|
|
771
|
+
else
|
|
772
|
+
fail "$mp mount FAILED"
|
|
773
|
+
fi
|
|
774
|
+
fi
|
|
775
|
+
fi
|
|
776
|
+
done < /etc/fstab
|
|
777
|
+
|
|
778
|
+
section "6/7" "Checking services..."
|
|
779
|
+
write_status "checking_services" "checking services"
|
|
780
|
+
for svc in "${{SERVICES[@]}}"; do
|
|
781
|
+
[ -z "$svc" ] && continue
|
|
782
|
+
if systemctl is-active "$svc" &>/dev/null; then
|
|
783
|
+
ok "$svc running"
|
|
784
|
+
else
|
|
785
|
+
repair "Starting $svc..."
|
|
786
|
+
systemctl enable --now "$svc" &>/dev/null && ok "$svc started" && ((REPAIRED++)) || fail "$svc FAILED"
|
|
787
|
+
fi
|
|
788
|
+
done
|
|
789
|
+
|
|
790
|
+
header "Diagnostic Summary"
|
|
791
|
+
log ""
|
|
792
|
+
log " Total: $TOTAL"
|
|
793
|
+
log " ${{GREEN}}Passed:${{NC}} $PASSED"
|
|
794
|
+
log " ${{YELLOW}}Repaired:${{NC}} $REPAIRED"
|
|
795
|
+
log " ${{RED}}Failed:${{NC}} $FAILED"
|
|
796
|
+
log ""
|
|
797
|
+
|
|
798
|
+
write_status "complete" "complete"
|
|
799
|
+
|
|
800
|
+
if [ $FAILED -eq 0 ]; then
|
|
801
|
+
log "${{GREEN}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
802
|
+
log "${{GREEN}}${{BOLD}} ✅ All checks passed! CloneBox VM is ready.${{NC}}"
|
|
803
|
+
log "${{GREEN}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
804
|
+
exit 0
|
|
805
|
+
else
|
|
806
|
+
log "${{RED}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
807
|
+
log "${{RED}}${{BOLD}} ⚠️ $FAILED checks failed. See /var/log/clonebox-boot.log${{NC}}"
|
|
808
|
+
log "${{RED}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
|
|
809
|
+
exit 1
|
|
810
|
+
fi
|
|
811
|
+
'''
|
|
812
|
+
return base64.b64encode(script.encode()).decode()
|
|
813
|
+
|
|
490
814
|
def _generate_health_check_script(self, config: VMConfig) -> str:
|
|
491
815
|
"""Generate a health check script that validates all installed components."""
|
|
492
816
|
import base64
|
|
@@ -741,10 +1065,60 @@ fi
|
|
|
741
1065
|
|
|
742
1066
|
# Add GUI setup if enabled - runs AFTER package installation completes
|
|
743
1067
|
if config.gui:
|
|
1068
|
+
# Create directories that GNOME services need BEFORE GUI starts
|
|
1069
|
+
# These may conflict with mounted host directories, so ensure they exist with correct perms
|
|
744
1070
|
runcmd_lines.extend([
|
|
1071
|
+
" - mkdir -p /home/ubuntu/.config/pulse /home/ubuntu/.cache/ibus /home/ubuntu/.local/share",
|
|
1072
|
+
" - mkdir -p /home/ubuntu/.config/dconf /home/ubuntu/.cache/tracker3",
|
|
1073
|
+
" - mkdir -p /home/ubuntu/.config/autostart",
|
|
1074
|
+
" - chown -R 1000:1000 /home/ubuntu/.config /home/ubuntu/.cache /home/ubuntu/.local",
|
|
1075
|
+
" - chmod 700 /home/ubuntu/.config /home/ubuntu/.cache",
|
|
745
1076
|
" - systemctl set-default graphical.target",
|
|
746
1077
|
" - systemctl enable gdm3 || systemctl enable gdm || true",
|
|
747
1078
|
])
|
|
1079
|
+
|
|
1080
|
+
# Create autostart entries for GUI apps
|
|
1081
|
+
autostart_apps = {
|
|
1082
|
+
'pycharm-community': ('PyCharm Community', '/snap/bin/pycharm-community', 'pycharm-community'),
|
|
1083
|
+
'firefox': ('Firefox', '/snap/bin/firefox', 'firefox'),
|
|
1084
|
+
'chromium': ('Chromium', '/snap/bin/chromium', 'chromium'),
|
|
1085
|
+
'google-chrome': ('Google Chrome', 'google-chrome-stable', 'google-chrome'),
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
for snap_pkg in config.snap_packages:
|
|
1089
|
+
if snap_pkg in autostart_apps:
|
|
1090
|
+
name, exec_cmd, icon = autostart_apps[snap_pkg]
|
|
1091
|
+
desktop_entry = f'''[Desktop Entry]
|
|
1092
|
+
Type=Application
|
|
1093
|
+
Name={name}
|
|
1094
|
+
Exec={exec_cmd}
|
|
1095
|
+
Icon={icon}
|
|
1096
|
+
X-GNOME-Autostart-enabled=true
|
|
1097
|
+
X-GNOME-Autostart-Delay=5
|
|
1098
|
+
Comment=CloneBox autostart
|
|
1099
|
+
'''
|
|
1100
|
+
import base64
|
|
1101
|
+
desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
|
|
1102
|
+
runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/{snap_pkg}.desktop")
|
|
1103
|
+
|
|
1104
|
+
# Check if google-chrome is in paths (app_data_paths)
|
|
1105
|
+
wants_chrome = any('/google-chrome' in str(p) for p in (config.paths or {}).values())
|
|
1106
|
+
if wants_chrome:
|
|
1107
|
+
name, exec_cmd, icon = autostart_apps['google-chrome']
|
|
1108
|
+
desktop_entry = f'''[Desktop Entry]
|
|
1109
|
+
Type=Application
|
|
1110
|
+
Name={name}
|
|
1111
|
+
Exec={exec_cmd}
|
|
1112
|
+
Icon={icon}
|
|
1113
|
+
X-GNOME-Autostart-enabled=true
|
|
1114
|
+
X-GNOME-Autostart-Delay=5
|
|
1115
|
+
Comment=CloneBox autostart
|
|
1116
|
+
'''
|
|
1117
|
+
desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
|
|
1118
|
+
runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/google-chrome.desktop")
|
|
1119
|
+
|
|
1120
|
+
# Fix ownership of autostart directory
|
|
1121
|
+
runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/.config/autostart")
|
|
748
1122
|
|
|
749
1123
|
# Run user-defined post commands
|
|
750
1124
|
if config.post_commands:
|
|
@@ -759,6 +1133,625 @@ fi
|
|
|
759
1133
|
runcmd_lines.append(" - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1")
|
|
760
1134
|
runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
|
|
761
1135
|
|
|
1136
|
+
# Generate boot diagnostic script (self-healing)
|
|
1137
|
+
boot_diag_script = self._generate_boot_diagnostic_script(config)
|
|
1138
|
+
runcmd_lines.append(f" - echo '{boot_diag_script}' | base64 -d > /usr/local/bin/clonebox-boot-diagnostic")
|
|
1139
|
+
runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-boot-diagnostic")
|
|
1140
|
+
|
|
1141
|
+
# Create systemd service for boot diagnostic (runs before GDM on subsequent boots)
|
|
1142
|
+
systemd_service = '''[Unit]
|
|
1143
|
+
Description=CloneBox Boot Diagnostic
|
|
1144
|
+
After=network-online.target snapd.service
|
|
1145
|
+
Before=gdm.service display-manager.service
|
|
1146
|
+
Wants=network-online.target
|
|
1147
|
+
|
|
1148
|
+
[Service]
|
|
1149
|
+
Type=oneshot
|
|
1150
|
+
ExecStart=/usr/local/bin/clonebox-boot-diagnostic
|
|
1151
|
+
StandardOutput=journal+console
|
|
1152
|
+
StandardError=journal+console
|
|
1153
|
+
TTYPath=/dev/tty1
|
|
1154
|
+
TTYReset=yes
|
|
1155
|
+
TTYVHangup=yes
|
|
1156
|
+
RemainAfterExit=yes
|
|
1157
|
+
TimeoutStartSec=600
|
|
1158
|
+
|
|
1159
|
+
[Install]
|
|
1160
|
+
WantedBy=multi-user.target'''
|
|
1161
|
+
import base64
|
|
1162
|
+
systemd_b64 = base64.b64encode(systemd_service.encode()).decode()
|
|
1163
|
+
runcmd_lines.append(f" - echo '{systemd_b64}' | base64 -d > /etc/systemd/system/clonebox-diagnostic.service")
|
|
1164
|
+
runcmd_lines.append(" - systemctl daemon-reload")
|
|
1165
|
+
runcmd_lines.append(" - systemctl enable clonebox-diagnostic.service")
|
|
1166
|
+
runcmd_lines.append(" - systemctl start clonebox-diagnostic.service || true")
|
|
1167
|
+
|
|
1168
|
+
# Create MOTD banner
|
|
1169
|
+
motd_banner = '''#!/bin/bash
|
|
1170
|
+
S="/var/run/clonebox-status"
|
|
1171
|
+
echo ""
|
|
1172
|
+
echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
|
|
1173
|
+
echo -e "\\033[1;34m CloneBox VM Status\\033[0m"
|
|
1174
|
+
echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
|
|
1175
|
+
if [ -f "$S" ]; then
|
|
1176
|
+
source "$S"
|
|
1177
|
+
if [ "${failed:-0}" -eq 0 ]; then
|
|
1178
|
+
echo -e " \\033[0;32m✅ All systems operational\\033[0m"
|
|
1179
|
+
else
|
|
1180
|
+
echo -e " \\033[0;31m⚠️ $failed checks failed\\033[0m"
|
|
1181
|
+
fi
|
|
1182
|
+
echo -e " Passed: ${passed:-0} | Repaired: ${repaired:-0} | Failed: ${failed:-0}"
|
|
1183
|
+
fi
|
|
1184
|
+
echo -e " Log: /var/log/clonebox-boot.log"
|
|
1185
|
+
echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
|
|
1186
|
+
echo ""'''
|
|
1187
|
+
motd_b64 = base64.b64encode(motd_banner.encode()).decode()
|
|
1188
|
+
runcmd_lines.append(f" - echo '{motd_b64}' | base64 -d > /etc/update-motd.d/99-clonebox")
|
|
1189
|
+
runcmd_lines.append(" - chmod +x /etc/update-motd.d/99-clonebox")
|
|
1190
|
+
|
|
1191
|
+
# Create user-friendly clonebox-repair script
|
|
1192
|
+
repair_script = r'''#!/bin/bash
|
|
1193
|
+
# CloneBox Repair - User-friendly repair utility for CloneBox VMs
|
|
1194
|
+
# Usage: clonebox-repair [--auto|--status|--logs|--help]
|
|
1195
|
+
|
|
1196
|
+
set -uo pipefail
|
|
1197
|
+
|
|
1198
|
+
RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' CYAN='\033[0;36m' NC='\033[0m' BOLD='\033[1m'
|
|
1199
|
+
|
|
1200
|
+
show_help() {
|
|
1201
|
+
echo -e "${BOLD}${CYAN}CloneBox Repair Utility${NC}"
|
|
1202
|
+
echo ""
|
|
1203
|
+
echo "Usage: clonebox-repair [OPTION]"
|
|
1204
|
+
echo ""
|
|
1205
|
+
echo "Options:"
|
|
1206
|
+
echo " --auto Run full automatic repair (same as boot diagnostic)"
|
|
1207
|
+
echo " --status Show current CloneBox status"
|
|
1208
|
+
echo " --logs Show recent repair logs"
|
|
1209
|
+
echo " --perms Fix directory permissions only"
|
|
1210
|
+
echo " --audio Fix audio (PulseAudio) and restart"
|
|
1211
|
+
echo " --keyring Reset GNOME Keyring (fixes password mismatch)"
|
|
1212
|
+
echo " --snaps Reconnect all snap interfaces only"
|
|
1213
|
+
echo " --mounts Remount all 9p filesystems only"
|
|
1214
|
+
echo " --all Run all fixes (perms + audio + snaps + mounts)"
|
|
1215
|
+
echo " --help Show this help message"
|
|
1216
|
+
echo ""
|
|
1217
|
+
echo "Without options, shows interactive menu."
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
show_status() {
|
|
1221
|
+
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1222
|
+
echo -e "${BOLD}${CYAN} CloneBox VM Status${NC}"
|
|
1223
|
+
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1224
|
+
|
|
1225
|
+
if [ -f /var/run/clonebox-status ]; then
|
|
1226
|
+
source /var/run/clonebox-status
|
|
1227
|
+
if [ "${failed:-0}" -eq 0 ]; then
|
|
1228
|
+
echo -e " ${GREEN}✅ All systems operational${NC}"
|
|
1229
|
+
else
|
|
1230
|
+
echo -e " ${RED}⚠️ $failed checks failed${NC}"
|
|
1231
|
+
fi
|
|
1232
|
+
echo -e " Passed: ${passed:-0} | Repaired: ${repaired:-0} | Failed: ${failed:-0}"
|
|
1233
|
+
else
|
|
1234
|
+
echo -e " ${YELLOW}No status information available${NC}"
|
|
1235
|
+
fi
|
|
1236
|
+
echo ""
|
|
1237
|
+
echo -e " Last boot diagnostic: $(stat -c %y /var/log/clonebox-boot.log 2>/dev/null || echo 'never')"
|
|
1238
|
+
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
show_logs() {
|
|
1242
|
+
echo -e "${BOLD}Recent repair logs:${NC}"
|
|
1243
|
+
echo ""
|
|
1244
|
+
tail -n 50 /var/log/clonebox-boot.log 2>/dev/null || echo "No logs found"
|
|
1245
|
+
}
|
|
1246
|
+
|
|
1247
|
+
fix_permissions() {
|
|
1248
|
+
echo -e "${CYAN}Fixing directory permissions...${NC}"
|
|
1249
|
+
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1250
|
+
VM_HOME="/home/$VM_USER"
|
|
1251
|
+
|
|
1252
|
+
DIRS_TO_CREATE=(
|
|
1253
|
+
"$VM_HOME/.config"
|
|
1254
|
+
"$VM_HOME/.config/pulse"
|
|
1255
|
+
"$VM_HOME/.config/dconf"
|
|
1256
|
+
"$VM_HOME/.config/ibus"
|
|
1257
|
+
"$VM_HOME/.cache"
|
|
1258
|
+
"$VM_HOME/.cache/ibus"
|
|
1259
|
+
"$VM_HOME/.cache/tracker3"
|
|
1260
|
+
"$VM_HOME/.cache/mesa_shader_cache"
|
|
1261
|
+
"$VM_HOME/.local"
|
|
1262
|
+
"$VM_HOME/.local/share"
|
|
1263
|
+
"$VM_HOME/.local/share/applications"
|
|
1264
|
+
"$VM_HOME/.local/share/keyrings"
|
|
1265
|
+
)
|
|
1266
|
+
|
|
1267
|
+
for dir in "${DIRS_TO_CREATE[@]}"; do
|
|
1268
|
+
if [ ! -d "$dir" ]; then
|
|
1269
|
+
mkdir -p "$dir" 2>/dev/null && echo " Created $dir"
|
|
1270
|
+
fi
|
|
1271
|
+
done
|
|
1272
|
+
|
|
1273
|
+
chown -R 1000:1000 "$VM_HOME/.config" "$VM_HOME/.cache" "$VM_HOME/.local" 2>/dev/null
|
|
1274
|
+
chmod 700 "$VM_HOME/.config" "$VM_HOME/.cache" 2>/dev/null
|
|
1275
|
+
|
|
1276
|
+
for snap_dir in "$VM_HOME/snap"/*; do
|
|
1277
|
+
[ -d "$snap_dir" ] && chown -R 1000:1000 "$snap_dir" 2>/dev/null
|
|
1278
|
+
done
|
|
1279
|
+
|
|
1280
|
+
echo -e "${GREEN}✅ Permissions fixed${NC}"
|
|
1281
|
+
}
|
|
1282
|
+
|
|
1283
|
+
fix_audio() {
|
|
1284
|
+
echo -e "${CYAN}Fixing audio (PulseAudio/PipeWire)...${NC}"
|
|
1285
|
+
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1286
|
+
VM_HOME="/home/$VM_USER"
|
|
1287
|
+
|
|
1288
|
+
# Create pulse config directory with correct permissions
|
|
1289
|
+
mkdir -p "$VM_HOME/.config/pulse" 2>/dev/null
|
|
1290
|
+
chown -R 1000:1000 "$VM_HOME/.config/pulse" 2>/dev/null
|
|
1291
|
+
chmod 700 "$VM_HOME/.config/pulse" 2>/dev/null
|
|
1292
|
+
|
|
1293
|
+
# Kill and restart audio services as user
|
|
1294
|
+
if [ -n "$SUDO_USER" ]; then
|
|
1295
|
+
sudo -u "$SUDO_USER" pulseaudio --kill 2>/dev/null || true
|
|
1296
|
+
sleep 1
|
|
1297
|
+
sudo -u "$SUDO_USER" pulseaudio --start 2>/dev/null || true
|
|
1298
|
+
echo " Restarted PulseAudio for $SUDO_USER"
|
|
1299
|
+
else
|
|
1300
|
+
pulseaudio --kill 2>/dev/null || true
|
|
1301
|
+
sleep 1
|
|
1302
|
+
pulseaudio --start 2>/dev/null || true
|
|
1303
|
+
echo " Restarted PulseAudio"
|
|
1304
|
+
fi
|
|
1305
|
+
|
|
1306
|
+
# Restart pipewire if available
|
|
1307
|
+
systemctl --user restart pipewire pipewire-pulse 2>/dev/null || true
|
|
1308
|
+
|
|
1309
|
+
echo -e "${GREEN}✅ Audio fixed${NC}"
|
|
1310
|
+
}
|
|
1311
|
+
|
|
1312
|
+
fix_keyring() {
|
|
1313
|
+
echo -e "${CYAN}Resetting GNOME Keyring...${NC}"
|
|
1314
|
+
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1315
|
+
VM_HOME="/home/$VM_USER"
|
|
1316
|
+
KEYRING_DIR="$VM_HOME/.local/share/keyrings"
|
|
1317
|
+
|
|
1318
|
+
echo -e "${YELLOW}⚠️ This will delete existing keyrings and create a new one on next login${NC}"
|
|
1319
|
+
echo -e "${YELLOW} Stored passwords (WiFi, Chrome, etc.) will be lost!${NC}"
|
|
1320
|
+
|
|
1321
|
+
if [ -t 0 ]; then
|
|
1322
|
+
read -rp "Continue? [y/N] " confirm
|
|
1323
|
+
[[ "$confirm" != [yY]* ]] && { echo "Cancelled"; return; }
|
|
1324
|
+
fi
|
|
1325
|
+
|
|
1326
|
+
# Backup old keyrings
|
|
1327
|
+
if [ -d "$KEYRING_DIR" ] && [ "$(ls -A "$KEYRING_DIR" 2>/dev/null)" ]; then
|
|
1328
|
+
backup_dir="$VM_HOME/.local/share/keyrings.backup.$(date +%Y%m%d%H%M%S)"
|
|
1329
|
+
mv "$KEYRING_DIR" "$backup_dir" 2>/dev/null
|
|
1330
|
+
echo " Backed up to $backup_dir"
|
|
1331
|
+
fi
|
|
1332
|
+
|
|
1333
|
+
# Create fresh keyring directory
|
|
1334
|
+
mkdir -p "$KEYRING_DIR" 2>/dev/null
|
|
1335
|
+
chown -R 1000:1000 "$KEYRING_DIR" 2>/dev/null
|
|
1336
|
+
chmod 700 "$KEYRING_DIR" 2>/dev/null
|
|
1337
|
+
|
|
1338
|
+
# Kill gnome-keyring-daemon to force restart on next login
|
|
1339
|
+
pkill -u "$VM_USER" gnome-keyring-daemon 2>/dev/null || true
|
|
1340
|
+
|
|
1341
|
+
echo -e "${GREEN}✅ Keyring reset - log out and back in to create new keyring${NC}"
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1344
|
+
fix_ibus() {
|
|
1345
|
+
echo -e "${CYAN}Fixing IBus input method...${NC}"
|
|
1346
|
+
VM_USER="${SUDO_USER:-ubuntu}"
|
|
1347
|
+
VM_HOME="/home/$VM_USER"
|
|
1348
|
+
|
|
1349
|
+
# Create ibus cache directory
|
|
1350
|
+
mkdir -p "$VM_HOME/.cache/ibus" 2>/dev/null
|
|
1351
|
+
chown -R 1000:1000 "$VM_HOME/.cache/ibus" 2>/dev/null
|
|
1352
|
+
chmod 700 "$VM_HOME/.cache/ibus" 2>/dev/null
|
|
1353
|
+
|
|
1354
|
+
# Restart ibus
|
|
1355
|
+
if [ -n "$SUDO_USER" ]; then
|
|
1356
|
+
sudo -u "$SUDO_USER" ibus restart 2>/dev/null || true
|
|
1357
|
+
else
|
|
1358
|
+
ibus restart 2>/dev/null || true
|
|
1359
|
+
fi
|
|
1360
|
+
|
|
1361
|
+
echo -e "${GREEN}✅ IBus fixed${NC}"
|
|
1362
|
+
}
|
|
1363
|
+
|
|
1364
|
+
fix_snaps() {
|
|
1365
|
+
echo -e "${CYAN}Reconnecting snap interfaces...${NC}"
|
|
1366
|
+
IFACES="desktop desktop-legacy x11 wayland home network audio-playback audio-record camera opengl"
|
|
1367
|
+
|
|
1368
|
+
for snap in $(snap list --color=never 2>/dev/null | tail -n +2 | awk '{print $1}'); do
|
|
1369
|
+
[[ "$snap" =~ ^(core|snapd|gnome-|gtk-|mesa-) ]] && continue
|
|
1370
|
+
echo -e " ${YELLOW}$snap${NC}"
|
|
1371
|
+
for iface in $IFACES; do
|
|
1372
|
+
snap connect "$snap:$iface" ":$iface" 2>/dev/null && echo " ✓ $iface" || true
|
|
1373
|
+
done
|
|
1374
|
+
done
|
|
1375
|
+
|
|
1376
|
+
systemctl restart snapd 2>/dev/null || true
|
|
1377
|
+
echo -e "${GREEN}✅ Snap interfaces reconnected${NC}"
|
|
1378
|
+
}
|
|
1379
|
+
|
|
1380
|
+
fix_mounts() {
|
|
1381
|
+
echo -e "${CYAN}Remounting filesystems...${NC}"
|
|
1382
|
+
|
|
1383
|
+
while IFS= read -r line; do
|
|
1384
|
+
tag=$(echo "$line" | awk '{print $1}')
|
|
1385
|
+
mp=$(echo "$line" | awk '{print $2}')
|
|
1386
|
+
if [[ "$tag" =~ ^mount[0-9]+$ ]] && [[ "$mp" == /* ]]; then
|
|
1387
|
+
if ! mountpoint -q "$mp" 2>/dev/null; then
|
|
1388
|
+
mkdir -p "$mp" 2>/dev/null
|
|
1389
|
+
if mount "$mp" 2>/dev/null; then
|
|
1390
|
+
echo -e " ${GREEN}✓${NC} $mp"
|
|
1391
|
+
else
|
|
1392
|
+
echo -e " ${RED}✗${NC} $mp (failed)"
|
|
1393
|
+
fi
|
|
1394
|
+
else
|
|
1395
|
+
echo -e " ${GREEN}✓${NC} $mp (already mounted)"
|
|
1396
|
+
fi
|
|
1397
|
+
fi
|
|
1398
|
+
done < /etc/fstab
|
|
1399
|
+
|
|
1400
|
+
echo -e "${GREEN}✅ Mounts checked${NC}"
|
|
1401
|
+
}
|
|
1402
|
+
|
|
1403
|
+
fix_all() {
|
|
1404
|
+
echo -e "${BOLD}${CYAN}Running all fixes...${NC}"
|
|
1405
|
+
echo ""
|
|
1406
|
+
fix_permissions
|
|
1407
|
+
echo ""
|
|
1408
|
+
fix_audio
|
|
1409
|
+
echo ""
|
|
1410
|
+
fix_ibus
|
|
1411
|
+
echo ""
|
|
1412
|
+
fix_snaps
|
|
1413
|
+
echo ""
|
|
1414
|
+
fix_mounts
|
|
1415
|
+
echo ""
|
|
1416
|
+
echo -e "${BOLD}${GREEN}All fixes completed!${NC}"
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
interactive_menu() {
|
|
1420
|
+
while true; do
|
|
1421
|
+
echo ""
|
|
1422
|
+
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1423
|
+
echo -e "${BOLD}${CYAN} CloneBox Repair Menu${NC}"
|
|
1424
|
+
echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
|
|
1425
|
+
echo ""
|
|
1426
|
+
echo " 1) Run full automatic repair (boot diagnostic)"
|
|
1427
|
+
echo " 2) Run all quick fixes (perms + audio + snaps + mounts)"
|
|
1428
|
+
echo " 3) Fix permissions only"
|
|
1429
|
+
echo " 4) Fix audio (PulseAudio) only"
|
|
1430
|
+
echo " 5) Reset GNOME Keyring (⚠️ deletes saved passwords)"
|
|
1431
|
+
echo " 6) Reconnect snap interfaces only"
|
|
1432
|
+
echo " 7) Remount filesystems only"
|
|
1433
|
+
echo " 8) Show status"
|
|
1434
|
+
echo " 9) Show logs"
|
|
1435
|
+
echo " q) Quit"
|
|
1436
|
+
echo ""
|
|
1437
|
+
read -rp "Select option: " choice
|
|
1438
|
+
|
|
1439
|
+
case "$choice" in
|
|
1440
|
+
1) sudo /usr/local/bin/clonebox-boot-diagnostic ;;
|
|
1441
|
+
2) fix_all ;;
|
|
1442
|
+
3) fix_permissions ;;
|
|
1443
|
+
4) fix_audio ;;
|
|
1444
|
+
5) fix_keyring ;;
|
|
1445
|
+
6) fix_snaps ;;
|
|
1446
|
+
7) fix_mounts ;;
|
|
1447
|
+
8) show_status ;;
|
|
1448
|
+
9) show_logs ;;
|
|
1449
|
+
q|Q) exit 0 ;;
|
|
1450
|
+
*) echo -e "${RED}Invalid option${NC}" ;;
|
|
1451
|
+
esac
|
|
1452
|
+
done
|
|
1453
|
+
}
|
|
1454
|
+
|
|
1455
|
+
# Main
|
|
1456
|
+
case "${1:-}" in
|
|
1457
|
+
--auto) exec sudo /usr/local/bin/clonebox-boot-diagnostic ;;
|
|
1458
|
+
--all) fix_all ;;
|
|
1459
|
+
--status) show_status ;;
|
|
1460
|
+
--logs) show_logs ;;
|
|
1461
|
+
--perms) fix_permissions ;;
|
|
1462
|
+
--audio) fix_audio ;;
|
|
1463
|
+
--keyring) fix_keyring ;;
|
|
1464
|
+
--snaps) fix_snaps ;;
|
|
1465
|
+
--mounts) fix_mounts ;;
|
|
1466
|
+
--help|-h) show_help ;;
|
|
1467
|
+
"") interactive_menu ;;
|
|
1468
|
+
*) show_help; exit 1 ;;
|
|
1469
|
+
esac
|
|
1470
|
+
'''
|
|
1471
|
+
repair_b64 = base64.b64encode(repair_script.encode()).decode()
|
|
1472
|
+
runcmd_lines.append(f" - echo '{repair_b64}' | base64 -d > /usr/local/bin/clonebox-repair")
|
|
1473
|
+
runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-repair")
|
|
1474
|
+
runcmd_lines.append(" - ln -sf /usr/local/bin/clonebox-repair /usr/local/bin/cb-repair")
|
|
1475
|
+
|
|
1476
|
+
# === AUTOSTART: Systemd user services + Desktop autostart files ===
|
|
1477
|
+
# Create directories for user systemd services and autostart
|
|
1478
|
+
runcmd_lines.append(f" - mkdir -p /home/{config.username}/.config/systemd/user")
|
|
1479
|
+
runcmd_lines.append(f" - mkdir -p /home/{config.username}/.config/autostart")
|
|
1480
|
+
|
|
1481
|
+
# Enable lingering for the user (allows user services to run without login)
|
|
1482
|
+
runcmd_lines.append(f" - loginctl enable-linger {config.username}")
|
|
1483
|
+
|
|
1484
|
+
# Generate autostart configurations based on installed apps (if enabled)
|
|
1485
|
+
autostart_apps = []
|
|
1486
|
+
|
|
1487
|
+
if getattr(config, 'autostart_apps', True):
|
|
1488
|
+
# Detect apps from snap_packages
|
|
1489
|
+
for snap_pkg in (config.snap_packages or []):
|
|
1490
|
+
if snap_pkg == "pycharm-community":
|
|
1491
|
+
autostart_apps.append({
|
|
1492
|
+
"name": "pycharm-community",
|
|
1493
|
+
"display_name": "PyCharm Community",
|
|
1494
|
+
"exec": "/snap/bin/pycharm-community %U",
|
|
1495
|
+
"type": "snap",
|
|
1496
|
+
"after": "graphical-session.target",
|
|
1497
|
+
})
|
|
1498
|
+
elif snap_pkg == "chromium":
|
|
1499
|
+
autostart_apps.append({
|
|
1500
|
+
"name": "chromium",
|
|
1501
|
+
"display_name": "Chromium Browser",
|
|
1502
|
+
"exec": "/snap/bin/chromium %U",
|
|
1503
|
+
"type": "snap",
|
|
1504
|
+
"after": "graphical-session.target",
|
|
1505
|
+
})
|
|
1506
|
+
elif snap_pkg == "firefox":
|
|
1507
|
+
autostart_apps.append({
|
|
1508
|
+
"name": "firefox",
|
|
1509
|
+
"display_name": "Firefox",
|
|
1510
|
+
"exec": "/snap/bin/firefox %U",
|
|
1511
|
+
"type": "snap",
|
|
1512
|
+
"after": "graphical-session.target",
|
|
1513
|
+
})
|
|
1514
|
+
elif snap_pkg == "code":
|
|
1515
|
+
autostart_apps.append({
|
|
1516
|
+
"name": "code",
|
|
1517
|
+
"display_name": "Visual Studio Code",
|
|
1518
|
+
"exec": "/snap/bin/code --new-window",
|
|
1519
|
+
"type": "snap",
|
|
1520
|
+
"after": "graphical-session.target",
|
|
1521
|
+
})
|
|
1522
|
+
|
|
1523
|
+
# Detect apps from packages (APT)
|
|
1524
|
+
for apt_pkg in (config.packages or []):
|
|
1525
|
+
if apt_pkg == "firefox":
|
|
1526
|
+
# Only add if not already added from snap
|
|
1527
|
+
if not any(a["name"] == "firefox" for a in autostart_apps):
|
|
1528
|
+
autostart_apps.append({
|
|
1529
|
+
"name": "firefox",
|
|
1530
|
+
"display_name": "Firefox",
|
|
1531
|
+
"exec": "/usr/bin/firefox %U",
|
|
1532
|
+
"type": "apt",
|
|
1533
|
+
"after": "graphical-session.target",
|
|
1534
|
+
})
|
|
1535
|
+
|
|
1536
|
+
# Check for google-chrome from app_data_paths
|
|
1537
|
+
for host_path, guest_path in (config.paths or {}).items():
|
|
1538
|
+
if guest_path == "/home/ubuntu/.config/google-chrome":
|
|
1539
|
+
autostart_apps.append({
|
|
1540
|
+
"name": "google-chrome",
|
|
1541
|
+
"display_name": "Google Chrome",
|
|
1542
|
+
"exec": "/usr/bin/google-chrome-stable %U",
|
|
1543
|
+
"type": "deb",
|
|
1544
|
+
"after": "graphical-session.target",
|
|
1545
|
+
})
|
|
1546
|
+
break
|
|
1547
|
+
|
|
1548
|
+
# Generate systemd user services for each app
|
|
1549
|
+
for app in autostart_apps:
|
|
1550
|
+
service_content = f'''[Unit]
|
|
1551
|
+
Description={app["display_name"]} Autostart
|
|
1552
|
+
After={app["after"]}
|
|
1553
|
+
|
|
1554
|
+
[Service]
|
|
1555
|
+
Type=simple
|
|
1556
|
+
Environment=DISPLAY=:0
|
|
1557
|
+
Environment=XDG_RUNTIME_DIR=/run/user/1000
|
|
1558
|
+
ExecStart={app["exec"]}
|
|
1559
|
+
Restart=on-failure
|
|
1560
|
+
RestartSec=5
|
|
1561
|
+
|
|
1562
|
+
[Install]
|
|
1563
|
+
WantedBy=default.target
|
|
1564
|
+
'''
|
|
1565
|
+
service_b64 = base64.b64encode(service_content.encode()).decode()
|
|
1566
|
+
service_path = f"/home/{config.username}/.config/systemd/user/{app['name']}.service"
|
|
1567
|
+
runcmd_lines.append(f" - echo '{service_b64}' | base64 -d > {service_path}")
|
|
1568
|
+
|
|
1569
|
+
# Generate desktop autostart files for GUI apps (alternative to systemd user services)
|
|
1570
|
+
for app in autostart_apps:
|
|
1571
|
+
desktop_content = f'''[Desktop Entry]
|
|
1572
|
+
Type=Application
|
|
1573
|
+
Name={app["display_name"]}
|
|
1574
|
+
Exec={app["exec"]}
|
|
1575
|
+
Hidden=false
|
|
1576
|
+
NoDisplay=false
|
|
1577
|
+
X-GNOME-Autostart-enabled=true
|
|
1578
|
+
X-GNOME-Autostart-Delay=5
|
|
1579
|
+
'''
|
|
1580
|
+
desktop_b64 = base64.b64encode(desktop_content.encode()).decode()
|
|
1581
|
+
desktop_path = f"/home/{config.username}/.config/autostart/{app['name']}.desktop"
|
|
1582
|
+
runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > {desktop_path}")
|
|
1583
|
+
|
|
1584
|
+
# Fix ownership of all autostart files
|
|
1585
|
+
runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/.config/systemd")
|
|
1586
|
+
runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/.config/autostart")
|
|
1587
|
+
|
|
1588
|
+
# Enable systemd user services (must run as user)
|
|
1589
|
+
if autostart_apps:
|
|
1590
|
+
services_to_enable = " ".join(f"{app['name']}.service" for app in autostart_apps)
|
|
1591
|
+
runcmd_lines.append(f" - sudo -u {config.username} XDG_RUNTIME_DIR=/run/user/1000 systemctl --user daemon-reload || true")
|
|
1592
|
+
# Note: We don't enable services by default as desktop autostart is more reliable for GUI apps
|
|
1593
|
+
# User can enable them manually with: systemctl --user enable <service>
|
|
1594
|
+
|
|
1595
|
+
# === WEB SERVICES: System-wide services for uvicorn, nginx, etc. ===
|
|
1596
|
+
web_services = getattr(config, 'web_services', []) or []
|
|
1597
|
+
for svc in web_services:
|
|
1598
|
+
svc_name = svc.get("name", "clonebox-web")
|
|
1599
|
+
svc_desc = svc.get("description", f"CloneBox {svc_name}")
|
|
1600
|
+
svc_workdir = svc.get("workdir", "/mnt/project0")
|
|
1601
|
+
svc_exec = svc.get("exec", "uvicorn app:app --host 0.0.0.0 --port 8000")
|
|
1602
|
+
svc_user = svc.get("user", config.username)
|
|
1603
|
+
svc_after = svc.get("after", "network.target")
|
|
1604
|
+
svc_env = svc.get("environment", [])
|
|
1605
|
+
|
|
1606
|
+
env_lines = "\n".join(f"Environment={e}" for e in svc_env) if svc_env else ""
|
|
1607
|
+
|
|
1608
|
+
web_service_content = f'''[Unit]
|
|
1609
|
+
Description={svc_desc}
|
|
1610
|
+
After={svc_after}
|
|
1611
|
+
|
|
1612
|
+
[Service]
|
|
1613
|
+
Type=simple
|
|
1614
|
+
User={svc_user}
|
|
1615
|
+
WorkingDirectory={svc_workdir}
|
|
1616
|
+
{env_lines}
|
|
1617
|
+
ExecStart={svc_exec}
|
|
1618
|
+
Restart=always
|
|
1619
|
+
RestartSec=10
|
|
1620
|
+
|
|
1621
|
+
[Install]
|
|
1622
|
+
WantedBy=multi-user.target
|
|
1623
|
+
'''
|
|
1624
|
+
web_svc_b64 = base64.b64encode(web_service_content.encode()).decode()
|
|
1625
|
+
runcmd_lines.append(f" - echo '{web_svc_b64}' | base64 -d > /etc/systemd/system/{svc_name}.service")
|
|
1626
|
+
runcmd_lines.append(" - systemctl daemon-reload")
|
|
1627
|
+
runcmd_lines.append(f" - systemctl enable {svc_name}.service")
|
|
1628
|
+
runcmd_lines.append(f" - systemctl start {svc_name}.service || true")
|
|
1629
|
+
|
|
1630
|
+
# Create Python monitor service for continuous diagnostics
|
|
1631
|
+
monitor_script = f'''#!/usr/bin/env python3
|
|
1632
|
+
"""CloneBox Monitor - Continuous diagnostics and app restart service."""
|
|
1633
|
+
import subprocess
|
|
1634
|
+
import time
|
|
1635
|
+
import os
|
|
1636
|
+
import sys
|
|
1637
|
+
import json
|
|
1638
|
+
from pathlib import Path
|
|
1639
|
+
|
|
1640
|
+
REQUIRED_APPS = {json.dumps([app["name"] for app in autostart_apps])}
|
|
1641
|
+
CHECK_INTERVAL = 60 # seconds
|
|
1642
|
+
LOG_FILE = "/var/log/clonebox-monitor.log"
|
|
1643
|
+
STATUS_FILE = "/var/run/clonebox-monitor-status.json"
|
|
1644
|
+
|
|
1645
|
+
def log(msg):
|
|
1646
|
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
|
1647
|
+
line = f"[{{timestamp}}] {{msg}}"
|
|
1648
|
+
print(line)
|
|
1649
|
+
try:
|
|
1650
|
+
with open(LOG_FILE, "a") as f:
|
|
1651
|
+
f.write(line + "\\n")
|
|
1652
|
+
except:
|
|
1653
|
+
pass
|
|
1654
|
+
|
|
1655
|
+
def get_running_processes():
|
|
1656
|
+
try:
|
|
1657
|
+
result = subprocess.run(["ps", "aux"], capture_output=True, text=True, timeout=10)
|
|
1658
|
+
return result.stdout
|
|
1659
|
+
except:
|
|
1660
|
+
return ""
|
|
1661
|
+
|
|
1662
|
+
def is_app_running(app_name, ps_output):
|
|
1663
|
+
patterns = {{
|
|
1664
|
+
"pycharm-community": ["pycharm", "idea"],
|
|
1665
|
+
"chromium": ["chromium"],
|
|
1666
|
+
"firefox": ["firefox", "firefox-esr"],
|
|
1667
|
+
"google-chrome": ["chrome", "google-chrome"],
|
|
1668
|
+
"code": ["code", "vscode"],
|
|
1669
|
+
}}
|
|
1670
|
+
for pattern in patterns.get(app_name, [app_name]):
|
|
1671
|
+
if pattern.lower() in ps_output.lower():
|
|
1672
|
+
return True
|
|
1673
|
+
return False
|
|
1674
|
+
|
|
1675
|
+
def restart_app(app_name):
|
|
1676
|
+
log(f"Restarting {{app_name}}...")
|
|
1677
|
+
try:
|
|
1678
|
+
subprocess.run(
|
|
1679
|
+
["sudo", "-u", "{config.username}", "systemctl", "--user", "restart", f"{{app_name}}.service"],
|
|
1680
|
+
timeout=30, capture_output=True
|
|
1681
|
+
)
|
|
1682
|
+
return True
|
|
1683
|
+
except Exception as e:
|
|
1684
|
+
log(f"Failed to restart {{app_name}}: {{e}}")
|
|
1685
|
+
return False
|
|
1686
|
+
|
|
1687
|
+
def check_mounts():
|
|
1688
|
+
try:
|
|
1689
|
+
with open("/etc/fstab", "r") as f:
|
|
1690
|
+
fstab = f.read()
|
|
1691
|
+
for line in fstab.split("\\n"):
|
|
1692
|
+
parts = line.split()
|
|
1693
|
+
if len(parts) >= 2 and parts[0].startswith("mount"):
|
|
1694
|
+
mp = parts[1]
|
|
1695
|
+
result = subprocess.run(["mountpoint", "-q", mp], capture_output=True)
|
|
1696
|
+
if result.returncode != 0:
|
|
1697
|
+
log(f"Mount {{mp}} not active, attempting remount...")
|
|
1698
|
+
subprocess.run(["mount", mp], capture_output=True)
|
|
1699
|
+
except Exception as e:
|
|
1700
|
+
log(f"Mount check failed: {{e}}")
|
|
1701
|
+
|
|
1702
|
+
def write_status(status):
|
|
1703
|
+
try:
|
|
1704
|
+
with open(STATUS_FILE, "w") as f:
|
|
1705
|
+
json.dump(status, f)
|
|
1706
|
+
except:
|
|
1707
|
+
pass
|
|
1708
|
+
|
|
1709
|
+
def main():
|
|
1710
|
+
log("CloneBox Monitor started")
|
|
1711
|
+
|
|
1712
|
+
while True:
|
|
1713
|
+
status = {{"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S"), "apps": {{}}, "mounts_ok": True}}
|
|
1714
|
+
|
|
1715
|
+
# Check mounts
|
|
1716
|
+
check_mounts()
|
|
1717
|
+
|
|
1718
|
+
# Check apps (only if GUI session is active)
|
|
1719
|
+
if os.path.exists("/run/user/1000"):
|
|
1720
|
+
ps_output = get_running_processes()
|
|
1721
|
+
for app in REQUIRED_APPS:
|
|
1722
|
+
running = is_app_running(app, ps_output)
|
|
1723
|
+
status["apps"][app] = "running" if running else "stopped"
|
|
1724
|
+
# Don't auto-restart apps - user may have closed them intentionally
|
|
1725
|
+
|
|
1726
|
+
write_status(status)
|
|
1727
|
+
time.sleep(CHECK_INTERVAL)
|
|
1728
|
+
|
|
1729
|
+
if __name__ == "__main__":
|
|
1730
|
+
main()
|
|
1731
|
+
'''
|
|
1732
|
+
monitor_b64 = base64.b64encode(monitor_script.encode()).decode()
|
|
1733
|
+
runcmd_lines.append(f" - echo '{monitor_b64}' | base64 -d > /usr/local/bin/clonebox-monitor")
|
|
1734
|
+
runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-monitor")
|
|
1735
|
+
|
|
1736
|
+
# Create systemd service for the Python monitor
|
|
1737
|
+
monitor_service = '''[Unit]
|
|
1738
|
+
Description=CloneBox Monitor Service
|
|
1739
|
+
After=network.target graphical.target
|
|
1740
|
+
|
|
1741
|
+
[Service]
|
|
1742
|
+
Type=simple
|
|
1743
|
+
ExecStart=/usr/bin/python3 /usr/local/bin/clonebox-monitor
|
|
1744
|
+
Restart=always
|
|
1745
|
+
RestartSec=30
|
|
1746
|
+
|
|
1747
|
+
[Install]
|
|
1748
|
+
WantedBy=multi-user.target'''
|
|
1749
|
+
monitor_svc_b64 = base64.b64encode(monitor_service.encode()).decode()
|
|
1750
|
+
runcmd_lines.append(f" - echo '{monitor_svc_b64}' | base64 -d > /etc/systemd/system/clonebox-monitor.service")
|
|
1751
|
+
runcmd_lines.append(" - systemctl daemon-reload")
|
|
1752
|
+
runcmd_lines.append(" - systemctl enable clonebox-monitor.service")
|
|
1753
|
+
runcmd_lines.append(" - systemctl start clonebox-monitor.service || true")
|
|
1754
|
+
|
|
762
1755
|
# Add reboot command at the end if GUI is enabled
|
|
763
1756
|
if config.gui:
|
|
764
1757
|
runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
|