clonebox 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cloner.py ADDED
@@ -0,0 +1,2081 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SelectiveVMCloner - Creates isolated VMs with only selected apps/paths/services.
4
+ """
5
+
6
+ import json
7
+ import os
8
+ import subprocess
9
+ import tempfile
10
+ import urllib.request
11
+ import uuid
12
+ import xml.etree.ElementTree as ET
13
+ from dataclasses import dataclass, field
14
+ from pathlib import Path
15
+ from typing import Optional
16
+
17
+ try:
18
+ from dotenv import load_dotenv
19
+ load_dotenv()
20
+ except ImportError:
21
+ pass # dotenv is optional
22
+
23
+ try:
24
+ import libvirt
25
+ except ImportError:
26
+ libvirt = None
27
+
28
+ SNAP_INTERFACES = {
29
+ 'pycharm-community': ['desktop', 'desktop-legacy', 'x11', 'wayland', 'home', 'network', 'network-bind', 'cups-control', 'removable-media'],
30
+ 'chromium': ['desktop', 'desktop-legacy', 'x11', 'wayland', 'home', 'network', 'audio-playback', 'camera'],
31
+ 'firefox': ['desktop', 'desktop-legacy', 'x11', 'wayland', 'home', 'network', 'audio-playback', 'removable-media'],
32
+ 'code': ['desktop', 'desktop-legacy', 'x11', 'wayland', 'home', 'network', 'ssh-keys'],
33
+ 'slack': ['desktop', 'desktop-legacy', 'x11', 'wayland', 'home', 'network', 'audio-playback'],
34
+ 'spotify': ['desktop', 'x11', 'wayland', 'home', 'network', 'audio-playback'],
35
+ }
36
+ DEFAULT_SNAP_INTERFACES = ['desktop', 'desktop-legacy', 'x11', 'home', 'network']
37
+
38
+
39
+ @dataclass
40
+ class VMConfig:
41
+ """Configuration for the VM to create."""
42
+
43
+ name: str = field(default_factory=lambda: os.getenv("VM_NAME", "clonebox-vm"))
44
+ ram_mb: int = field(default_factory=lambda: int(os.getenv("VM_RAM_MB", "8192")))
45
+ vcpus: int = field(default_factory=lambda: int(os.getenv("VM_VCPUS", "4")))
46
+ disk_size_gb: int = field(default_factory=lambda: int(os.getenv("VM_DISK_SIZE_GB", "20")))
47
+ gui: bool = field(default_factory=lambda: os.getenv("VM_GUI", "true").lower() == "true")
48
+ base_image: Optional[str] = field(default_factory=lambda: os.getenv("VM_BASE_IMAGE") or None)
49
+ paths: dict = field(default_factory=dict)
50
+ packages: list = field(default_factory=list)
51
+ snap_packages: list = field(default_factory=list) # Snap packages to install
52
+ services: list = field(default_factory=list)
53
+ post_commands: list = field(default_factory=list) # Commands to run after setup
54
+ user_session: bool = field(default_factory=lambda: os.getenv("VM_USER_SESSION", "false").lower() == "true") # Use qemu:///session instead of qemu:///system
55
+ network_mode: str = field(default_factory=lambda: os.getenv("VM_NETWORK_MODE", "auto")) # auto|default|user
56
+ username: str = field(default_factory=lambda: os.getenv("VM_USERNAME", "ubuntu")) # VM default username
57
+ password: str = field(default_factory=lambda: os.getenv("VM_PASSWORD", "ubuntu")) # VM default password
58
+ autostart_apps: bool = field(default_factory=lambda: os.getenv("VM_AUTOSTART_APPS", "true").lower() == "true") # Auto-start GUI apps after login (desktop autostart)
59
+ web_services: list = field(default_factory=list) # Web services to start (uvicorn, etc.)
60
+
61
+ def to_dict(self) -> dict:
62
+ return {
63
+ "paths": self.paths,
64
+ "packages": self.packages,
65
+ "services": self.services,
66
+ }
67
+
68
+
69
+ class SelectiveVMCloner:
70
+ """
71
+ Creates VMs with only selected applications, paths and services.
72
+ Uses bind mounts instead of full disk cloning.
73
+ """
74
+
75
+ def __init__(self, conn_uri: str = None, user_session: bool = False):
76
+ self.user_session = user_session
77
+ if conn_uri:
78
+ self.conn_uri = conn_uri
79
+ else:
80
+ self.conn_uri = "qemu:///session" if user_session else "qemu:///system"
81
+ self.conn = None
82
+ self._connect()
83
+
84
+ @property
85
+ def SYSTEM_IMAGES_DIR(self) -> Path:
86
+ return Path(os.getenv("CLONEBOX_SYSTEM_IMAGES_DIR", "/var/lib/libvirt/images"))
87
+
88
+ @property
89
+ def USER_IMAGES_DIR(self) -> Path:
90
+ return Path(os.getenv("CLONEBOX_USER_IMAGES_DIR", str(Path.home() / ".local/share/libvirt/images"))).expanduser()
91
+
92
+ @property
93
+ def DEFAULT_BASE_IMAGE_URL(self) -> str:
94
+ return os.getenv(
95
+ "CLONEBOX_BASE_IMAGE_URL",
96
+ "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
97
+ )
98
+
99
+ @property
100
+ def DEFAULT_BASE_IMAGE_FILENAME(self) -> str:
101
+ return os.getenv(
102
+ "CLONEBOX_BASE_IMAGE_FILENAME",
103
+ "clonebox-ubuntu-jammy-amd64.qcow2"
104
+ )
105
+
106
+ def _connect(self):
107
+ """Connect to libvirt."""
108
+ if libvirt is None:
109
+ raise ImportError(
110
+ "libvirt-python is required. Install with: pip install libvirt-python\n"
111
+ "Also ensure libvirt is installed: sudo apt install libvirt-daemon-system"
112
+ )
113
+
114
+ try:
115
+ self.conn = libvirt.open(self.conn_uri)
116
+ except libvirt.libvirtError as e:
117
+ raise ConnectionError(
118
+ f"Cannot connect to {self.conn_uri}\n"
119
+ f"Error: {e}\n\n"
120
+ f"Troubleshooting:\n"
121
+ f" 1. Check if libvirtd is running: sudo systemctl status libvirtd\n"
122
+ f" 2. Start libvirtd: sudo systemctl start libvirtd\n"
123
+ f" 3. Add user to libvirt group: sudo usermod -aG libvirt $USER\n"
124
+ f" 4. Re-login or run: newgrp libvirt\n"
125
+ f" 5. For user session (no sudo): use --user flag"
126
+ )
127
+
128
+ if self.conn is None:
129
+ raise ConnectionError(f"Cannot connect to {self.conn_uri}")
130
+
131
+ def get_images_dir(self) -> Path:
132
+ """Get the appropriate images directory based on session type."""
133
+ if self.user_session:
134
+ return self.USER_IMAGES_DIR
135
+ return self.SYSTEM_IMAGES_DIR
136
+
137
+ def _get_downloads_dir(self) -> Path:
138
+ return Path.home() / "Downloads"
139
+
140
+ def _ensure_default_base_image(self, console=None) -> Path:
141
+ def log(msg):
142
+ if console:
143
+ console.print(msg)
144
+ else:
145
+ print(msg)
146
+
147
+ downloads_dir = self._get_downloads_dir()
148
+ downloads_dir.mkdir(parents=True, exist_ok=True)
149
+ cached_path = downloads_dir / self.DEFAULT_BASE_IMAGE_FILENAME
150
+
151
+ if cached_path.exists() and cached_path.stat().st_size > 0:
152
+ return cached_path
153
+
154
+ log(
155
+ "[cyan]⬇️ Downloading base image (first run only). This will be cached in ~/Downloads...[/]"
156
+ )
157
+
158
+ try:
159
+ with tempfile.NamedTemporaryFile(
160
+ prefix=f"{self.DEFAULT_BASE_IMAGE_FILENAME}.",
161
+ dir=str(downloads_dir),
162
+ delete=False,
163
+ ) as tmp:
164
+ tmp_path = Path(tmp.name)
165
+
166
+ try:
167
+ urllib.request.urlretrieve(self.DEFAULT_BASE_IMAGE_URL, tmp_path)
168
+ tmp_path.replace(cached_path)
169
+ finally:
170
+ if tmp_path.exists() and tmp_path != cached_path:
171
+ try:
172
+ tmp_path.unlink()
173
+ except Exception:
174
+ pass
175
+ except Exception as e:
176
+ raise RuntimeError(
177
+ "Failed to download a default base image.\n\n"
178
+ "🔧 Solutions:\n"
179
+ " 1. Provide a base image explicitly:\n"
180
+ " clonebox clone . --base-image /path/to/image.qcow2\n"
181
+ " 2. Download it manually and reuse it:\n"
182
+ f" wget -O {cached_path} {self.DEFAULT_BASE_IMAGE_URL}\n\n"
183
+ f"Original error: {e}"
184
+ ) from e
185
+
186
+ return cached_path
187
+
188
+ def _default_network_active(self) -> bool:
189
+ """Check if libvirt default network is active."""
190
+ try:
191
+ net = self.conn.networkLookupByName("default")
192
+ return net.isActive() == 1
193
+ except Exception:
194
+ return False
195
+
196
+ def resolve_network_mode(self, config: VMConfig) -> str:
197
+ """Resolve network mode based on config and session type."""
198
+ mode = (config.network_mode or "auto").lower()
199
+ if mode == "auto":
200
+ if self.user_session and not self._default_network_active():
201
+ return "user"
202
+ return "default"
203
+ if mode in {"default", "user"}:
204
+ return mode
205
+ return "default"
206
+
207
+ def check_prerequisites(self) -> dict:
208
+ """Check system prerequisites for VM creation."""
209
+ images_dir = self.get_images_dir()
210
+
211
+ checks = {
212
+ "libvirt_connected": False,
213
+ "kvm_available": False,
214
+ "default_network": False,
215
+ "images_dir_writable": False,
216
+ "images_dir": str(images_dir),
217
+ "session_type": "user" if self.user_session else "system",
218
+ }
219
+
220
+ # Check libvirt connection
221
+ if self.conn and self.conn.isAlive():
222
+ checks["libvirt_connected"] = True
223
+
224
+ # Check KVM
225
+ kvm_path = Path("/dev/kvm")
226
+ checks["kvm_available"] = kvm_path.exists()
227
+ if not checks["kvm_available"]:
228
+ checks["kvm_error"] = "KVM not available. Enable virtualization in BIOS."
229
+ elif not os.access(kvm_path, os.R_OK | os.W_OK):
230
+ checks["kvm_error"] = (
231
+ "No access to /dev/kvm. Add user to kvm group: sudo usermod -aG kvm $USER"
232
+ )
233
+
234
+ # Check default network
235
+ try:
236
+ net = self.conn.networkLookupByName("default")
237
+ checks["default_network"] = net.isActive() == 1
238
+ except libvirt.libvirtError:
239
+ checks["network_error"] = (
240
+ "Default network not found or inactive.\n"
241
+ " For user session, CloneBox can use user-mode networking (slirp) automatically.\n"
242
+ " Or create a user network:\n"
243
+ " virsh --connect qemu:///session net-define /tmp/default-network.xml\n"
244
+ " virsh --connect qemu:///session net-start default\n"
245
+ " Or use system session: clonebox clone . (without --user)\n"
246
+ )
247
+
248
+ # Check images directory
249
+ if images_dir.exists():
250
+ checks["images_dir_writable"] = os.access(images_dir, os.W_OK)
251
+ if not checks["images_dir_writable"]:
252
+ checks["images_dir_error"] = (
253
+ f"Cannot write to {images_dir}\n"
254
+ f" Option 1: Run with sudo\n"
255
+ f" Option 2: Use --user flag for user session (no root needed)\n"
256
+ f" Option 3: Fix permissions: sudo chown -R $USER:libvirt {images_dir}"
257
+ )
258
+ else:
259
+ # Try to create it
260
+ try:
261
+ images_dir.mkdir(parents=True, exist_ok=True)
262
+ checks["images_dir_writable"] = True
263
+ except PermissionError:
264
+ checks["images_dir_writable"] = False
265
+ checks["images_dir_error"] = (
266
+ f"Cannot create {images_dir}\n"
267
+ f" Use --user flag for user session (stores in ~/.local/share/libvirt/images/)"
268
+ )
269
+
270
+ return checks
271
+
272
+ def create_vm(self, config: VMConfig, console=None, replace: bool = False) -> str:
273
+ """
274
+ Create a VM with only selected applications/paths.
275
+
276
+ Args:
277
+ config: VMConfig with paths, packages, services
278
+ console: Rich console for output (optional)
279
+
280
+ Returns:
281
+ UUID of created VM
282
+ """
283
+
284
+ def log(msg):
285
+ if console:
286
+ console.print(msg)
287
+ else:
288
+ print(msg)
289
+
290
+ # If VM already exists, optionally replace it
291
+ existing_vm = None
292
+ try:
293
+ candidate_vm = self.conn.lookupByName(config.name)
294
+ if candidate_vm is not None:
295
+ # libvirt returns a domain object whose .name() should match the requested name.
296
+ # In tests, an unconfigured MagicMock may be returned here; avoid treating that as
297
+ # a real existing domain unless we can confirm the name matches.
298
+ try:
299
+ if hasattr(candidate_vm, "name") and callable(candidate_vm.name):
300
+ if candidate_vm.name() == config.name:
301
+ existing_vm = candidate_vm
302
+ else:
303
+ existing_vm = candidate_vm
304
+ except Exception:
305
+ existing_vm = candidate_vm
306
+ except Exception:
307
+ existing_vm = None
308
+
309
+ if existing_vm is not None:
310
+ if not replace:
311
+ raise RuntimeError(
312
+ f"VM '{config.name}' already exists.\n\n"
313
+ f"🔧 Solutions:\n"
314
+ f" 1. Reuse existing VM: clonebox start {config.name}\n"
315
+ f" 2. Replace it: clonebox clone . --name {config.name} --replace\n"
316
+ f" 3. Delete it: clonebox delete {config.name}\n"
317
+ )
318
+
319
+ log(f"[yellow]⚠️ VM '{config.name}' already exists - replacing...[/]")
320
+ self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
321
+
322
+ # Determine images directory
323
+ images_dir = self.get_images_dir()
324
+ vm_dir = images_dir / config.name
325
+
326
+ try:
327
+ vm_dir.mkdir(parents=True, exist_ok=True)
328
+ except PermissionError as e:
329
+ raise PermissionError(
330
+ f"Cannot create VM directory: {vm_dir}\n\n"
331
+ f"🔧 Solutions:\n"
332
+ f" 1. Use --user flag to run in user session (recommended):\n"
333
+ f" clonebox clone . --user\n\n"
334
+ f" 2. Run with sudo (not recommended):\n"
335
+ f" sudo clonebox clone .\n\n"
336
+ f" 3. Fix directory permissions:\n"
337
+ f" sudo mkdir -p {images_dir}\n"
338
+ f" sudo chown -R $USER:libvirt {images_dir}\n\n"
339
+ f"Original error: {e}"
340
+ ) from e
341
+
342
+ # Create root disk
343
+ root_disk = vm_dir / "root.qcow2"
344
+
345
+ if not config.base_image:
346
+ config.base_image = str(self._ensure_default_base_image(console=console))
347
+
348
+ if config.base_image and Path(config.base_image).exists():
349
+ # Use backing file for faster creation
350
+ log(f"[cyan]📀 Creating disk with backing file: {config.base_image}[/]")
351
+ cmd = [
352
+ "qemu-img",
353
+ "create",
354
+ "-f",
355
+ "qcow2",
356
+ "-b",
357
+ config.base_image,
358
+ "-F",
359
+ "qcow2",
360
+ str(root_disk),
361
+ f"{config.disk_size_gb}G",
362
+ ]
363
+ else:
364
+ # Create empty disk
365
+ log(f"[cyan]📀 Creating empty {config.disk_size_gb}GB disk...[/]")
366
+ cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
367
+
368
+ subprocess.run(cmd, check=True, capture_output=True)
369
+
370
+ # Create cloud-init ISO if packages/services specified
371
+ cloudinit_iso = None
372
+ if config.packages or config.services:
373
+ cloudinit_iso = self._create_cloudinit_iso(vm_dir, config)
374
+ log(f"[cyan]☁️ Created cloud-init ISO with {len(config.packages)} packages[/]")
375
+
376
+ # Resolve network mode
377
+ network_mode = self.resolve_network_mode(config)
378
+ if network_mode == "user":
379
+ log(
380
+ "[yellow]⚠️ Using user-mode networking (slirp) because default libvirt network is unavailable[/]"
381
+ )
382
+ else:
383
+ log(f"[dim]Network mode: {network_mode}[/]")
384
+
385
+ # Generate VM XML
386
+ vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
387
+
388
+ # Define and create VM
389
+ log(f"[cyan]🔧 Defining VM '{config.name}'...[/]")
390
+ try:
391
+ vm = self.conn.defineXML(vm_xml)
392
+ except Exception as e:
393
+ raise RuntimeError(
394
+ f"Failed to define VM '{config.name}'.\n"
395
+ f"Error: {e}\n\n"
396
+ f"If the VM already exists, try: clonebox clone . --name {config.name} --replace\n"
397
+ ) from e
398
+
399
+ log(f"[green]✅ VM '{config.name}' created successfully![/]")
400
+ log(f"[dim] UUID: {vm.UUIDString()}[/]")
401
+
402
+ return vm.UUIDString()
403
+
404
+ def _generate_vm_xml(
405
+ self, config: VMConfig, root_disk: Path, cloudinit_iso: Optional[Path]
406
+ ) -> str:
407
+ """Generate libvirt XML for the VM."""
408
+
409
+ root = ET.Element("domain", type="kvm")
410
+
411
+ # Basic metadata
412
+ ET.SubElement(root, "name").text = config.name
413
+ ET.SubElement(root, "uuid").text = str(uuid.uuid4())
414
+ ET.SubElement(root, "memory", unit="MiB").text = str(config.ram_mb)
415
+ ET.SubElement(root, "currentMemory", unit="MiB").text = str(config.ram_mb)
416
+ ET.SubElement(root, "vcpu", placement="static").text = str(config.vcpus)
417
+
418
+ # OS configuration
419
+ os_elem = ET.SubElement(root, "os")
420
+ ET.SubElement(os_elem, "type", arch="x86_64", machine="q35").text = "hvm"
421
+ ET.SubElement(os_elem, "boot", dev="hd")
422
+
423
+ # Features
424
+ features = ET.SubElement(root, "features")
425
+ ET.SubElement(features, "acpi")
426
+ ET.SubElement(features, "apic")
427
+
428
+ # CPU
429
+ ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
430
+
431
+ # Devices
432
+ devices = ET.SubElement(root, "devices")
433
+
434
+ # Emulator
435
+ ET.SubElement(devices, "emulator").text = "/usr/bin/qemu-system-x86_64"
436
+
437
+ # Root disk
438
+ disk = ET.SubElement(devices, "disk", type="file", device="disk")
439
+ ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
440
+ ET.SubElement(disk, "source", file=str(root_disk))
441
+ ET.SubElement(disk, "target", dev="vda", bus="virtio")
442
+
443
+ # Cloud-init ISO
444
+ if cloudinit_iso:
445
+ cdrom = ET.SubElement(devices, "disk", type="file", device="cdrom")
446
+ ET.SubElement(cdrom, "driver", name="qemu", type="raw")
447
+ ET.SubElement(cdrom, "source", file=str(cloudinit_iso))
448
+ ET.SubElement(cdrom, "target", dev="sda", bus="sata")
449
+ ET.SubElement(cdrom, "readonly")
450
+
451
+ # 9p filesystem mounts (bind mounts from host)
452
+ # Use accessmode="mapped" to allow VM user to access host files regardless of UID
453
+ for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
454
+ if Path(host_path).exists():
455
+ fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
456
+ ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
457
+ ET.SubElement(fs, "source", dir=host_path)
458
+ # Use simple tag names for 9p mounts
459
+ tag = f"mount{idx}"
460
+ ET.SubElement(fs, "target", dir=tag)
461
+
462
+ # Network interface
463
+ network_mode = self.resolve_network_mode(config)
464
+ if network_mode == "user":
465
+ iface = ET.SubElement(devices, "interface", type="user")
466
+ ET.SubElement(iface, "model", type="virtio")
467
+ else:
468
+ iface = ET.SubElement(devices, "interface", type="network")
469
+ ET.SubElement(iface, "source", network="default")
470
+ ET.SubElement(iface, "model", type="virtio")
471
+
472
+ # Serial console
473
+ serial = ET.SubElement(devices, "serial", type="pty")
474
+ ET.SubElement(serial, "target", port="0")
475
+
476
+ console_elem = ET.SubElement(devices, "console", type="pty")
477
+ ET.SubElement(console_elem, "target", type="serial", port="0")
478
+
479
+ # Graphics (SPICE)
480
+ if config.gui:
481
+ graphics = ET.SubElement(
482
+ devices, "graphics", type="spice", autoport="yes", listen="127.0.0.1"
483
+ )
484
+ ET.SubElement(graphics, "listen", type="address", address="127.0.0.1")
485
+
486
+ # Video
487
+ video = ET.SubElement(devices, "video")
488
+ ET.SubElement(video, "model", type="virtio", heads="1", primary="yes")
489
+
490
+ # Input devices
491
+ ET.SubElement(devices, "input", type="tablet", bus="usb")
492
+ ET.SubElement(devices, "input", type="keyboard", bus="usb")
493
+
494
+ # Channel for guest agent
495
+ channel = ET.SubElement(devices, "channel", type="unix")
496
+ ET.SubElement(channel, "target", type="virtio", name="org.qemu.guest_agent.0")
497
+
498
+ # Memory balloon
499
+ memballoon = ET.SubElement(devices, "memballoon", model="virtio")
500
+ ET.SubElement(
501
+ memballoon,
502
+ "address",
503
+ type="pci",
504
+ domain="0x0000",
505
+ bus="0x00",
506
+ slot="0x08",
507
+ function="0x0",
508
+ )
509
+
510
+ return ET.tostring(root, encoding="unicode")
511
+
512
+ def _generate_boot_diagnostic_script(self, config: VMConfig) -> str:
513
+ """Generate boot diagnostic script with self-healing capabilities."""
514
+ import base64
515
+
516
+ wants_google_chrome = any(
517
+ p == "/home/ubuntu/.config/google-chrome" for p in (config.paths or {}).values()
518
+ )
519
+
520
+ apt_pkg_list = list(config.packages or [])
521
+ for base_pkg in ["qemu-guest-agent", "cloud-guest-utils"]:
522
+ if base_pkg not in apt_pkg_list:
523
+ apt_pkg_list.insert(0, base_pkg)
524
+ if config.gui:
525
+ for gui_pkg in ["ubuntu-desktop-minimal", "firefox"]:
526
+ if gui_pkg not in apt_pkg_list:
527
+ apt_pkg_list.append(gui_pkg)
528
+
529
+ apt_packages = " ".join(f'"{p}"' for p in apt_pkg_list) if apt_pkg_list else ""
530
+ snap_packages = " ".join(f'"{p}"' for p in config.snap_packages) if config.snap_packages else ""
531
+ services = " ".join(f'"{s}"' for s in config.services) if config.services else ""
532
+
533
+ snap_ifaces_bash = "\n".join(
534
+ f'SNAP_INTERFACES["{snap}"]="{" ".join(ifaces)}"'
535
+ for snap, ifaces in SNAP_INTERFACES.items()
536
+ )
537
+
538
+ script = f'''#!/bin/bash
539
+ set -uo pipefail
540
+ LOG="/var/log/clonebox-boot.log"
541
+ STATUS_KV="/var/run/clonebox-status"
542
+ STATUS_JSON="/var/run/clonebox-status.json"
543
+ MAX_RETRIES=3
544
+ PASSED=0 FAILED=0 REPAIRED=0 TOTAL=0
545
+
546
+ RED='\\033[0;31m' GREEN='\\033[0;32m' YELLOW='\\033[1;33m' CYAN='\\033[0;36m' NC='\\033[0m' BOLD='\\033[1m'
547
+
548
+ log() {{ echo -e "[$(date +%H:%M:%S)] $1" | tee -a "$LOG"; }}
549
+ ok() {{ log "${{GREEN}}✅ $1${{NC}}"; ((PASSED++)); ((TOTAL++)); }}
550
+ fail() {{ log "${{RED}}❌ $1${{NC}}"; ((FAILED++)); ((TOTAL++)); }}
551
+ repair() {{ log "${{YELLOW}}🔧 $1${{NC}}"; }}
552
+ section() {{ log ""; log "${{BOLD}}[$1] $2${{NC}}"; }}
553
+
554
+ write_status() {{
555
+ local phase="$1"
556
+ local current_task="${{2:-}}"
557
+ printf 'passed=%s failed=%s repaired=%s\n' "$PASSED" "$FAILED" "$REPAIRED" > "$STATUS_KV" 2>/dev/null || true
558
+ cat > "$STATUS_JSON" <<EOF
559
+ {{"phase":"$phase","current_task":"$current_task","total":$TOTAL,"passed":$PASSED,"failed":$FAILED,"repaired":$REPAIRED,"timestamp":"$(date -Iseconds)"}}
560
+ EOF
561
+ }}
562
+
563
+ header() {{
564
+ log ""
565
+ log "${{BOLD}}${{CYAN}}═══════════════════════════════════════════════════════════${{NC}}"
566
+ log "${{BOLD}}${{CYAN}} $1${{NC}}"
567
+ log "${{BOLD}}${{CYAN}}═══════════════════════════════════════════════════════════${{NC}}"
568
+ }}
569
+
570
+ declare -A SNAP_INTERFACES
571
+ {snap_ifaces_bash}
572
+ DEFAULT_IFACES="desktop desktop-legacy x11 home network"
573
+
574
+ check_apt() {{
575
+ dpkg -l "$1" 2>/dev/null | grep -q "^ii"
576
+ }}
577
+
578
+ install_apt() {{
579
+ for i in $(seq 1 $MAX_RETRIES); do
580
+ DEBIAN_FRONTEND=noninteractive apt-get install -y "$1" &>>"$LOG" && return 0
581
+ sleep 3
582
+ done
583
+ return 1
584
+ }}
585
+
586
+ check_snap() {{
587
+ snap list "$1" &>/dev/null
588
+ }}
589
+
590
+ install_snap() {{
591
+ timeout 60 snap wait system seed.loaded 2>/dev/null || true
592
+ for i in $(seq 1 $MAX_RETRIES); do
593
+ snap install "$1" --classic &>>"$LOG" && return 0
594
+ snap install "$1" &>>"$LOG" && return 0
595
+ sleep 5
596
+ done
597
+ return 1
598
+ }}
599
+
600
+ connect_interfaces() {{
601
+ local snap="$1"
602
+ local ifaces="${{SNAP_INTERFACES[$snap]:-$DEFAULT_IFACES}}"
603
+ for iface in $ifaces; do
604
+ snap connect "$snap:$iface" ":$iface" 2>/dev/null && log " ${{GREEN}}✓${{NC}} $snap:$iface" || true
605
+ done
606
+ }}
607
+
608
+ test_launch() {{
609
+ local app="$1"
610
+ local temp_output="/tmp/$app-test.log"
611
+ local error_detail="/tmp/$app-error.log"
612
+
613
+ case "$app" in
614
+ pycharm-community)
615
+ if timeout 10 /snap/pycharm-community/current/jbr/bin/java -version &>"$temp_output"; then
616
+ return 0
617
+ else
618
+ echo "PyCharm Java test failed:" >> "$error_detail"
619
+ cat "$temp_output" >> "$error_detail" 2>&1 || true
620
+ return 1
621
+ fi
622
+ ;;
623
+ chromium)
624
+ # First check if chromium can run at all
625
+ if ! command -v chromium >/dev/null 2>&1; then
626
+ echo "ERROR: chromium not found in PATH" >> "$error_detail"
627
+ echo "PATH=$PATH" >> "$error_detail"
628
+ return 1
629
+ fi
630
+
631
+ # Try with different approaches
632
+ if timeout 10 chromium --headless=new --dump-dom about:blank &>"$temp_output" 2>&1; then
633
+ return 0
634
+ else
635
+ echo "Chromium headless test failed:" >> "$error_detail"
636
+ cat "$temp_output" >> "$error_detail"
637
+
638
+ # Try basic version check
639
+ echo "Trying chromium --version:" >> "$error_detail"
640
+ timeout 5 chromium --version >> "$error_detail" 2>&1 || true
641
+
642
+ # Check display
643
+ echo "Display check:" >> "$error_detail"
644
+ echo "DISPLAY=${{DISPLAY:-unset}}" >> "$error_detail"
645
+ echo "XDG_RUNTIME_DIR=${{XDG_RUNTIME_DIR:-unset}}" >> "$error_detail"
646
+ ls -la /tmp/.X11-unix/ >> "$error_detail" 2>&1 || true
647
+
648
+ return 1
649
+ fi
650
+ ;;
651
+ firefox)
652
+ if timeout 10 firefox --headless --screenshot /tmp/ff-test.png about:blank &>/dev/null; then
653
+ rm -f /tmp/ff-test.png
654
+ return 0
655
+ else
656
+ echo "Firefox headless test failed" >> "$error_detail"
657
+ timeout 5 firefox --version >> "$error_detail" 2>&1 || true
658
+ return 1
659
+ fi
660
+ ;;
661
+ docker)
662
+ if docker info &>/dev/null; then
663
+ return 0
664
+ else
665
+ echo "Docker info failed:" >> "$error_detail"
666
+ docker info >> "$error_detail" 2>&1 || true
667
+ return 1
668
+ fi
669
+ ;;
670
+ *)
671
+ if command -v "$1" &>/dev/null; then
672
+ return 0
673
+ else
674
+ echo "Command not found: $1" >> "$error_detail"
675
+ echo "PATH=$PATH" >> "$error_detail"
676
+ return 1
677
+ fi
678
+ ;;
679
+ esac
680
+ }}
681
+
682
+ header "CloneBox VM Boot Diagnostic"
683
+ write_status "starting" "boot diagnostic starting"
684
+
685
+ APT_PACKAGES=({apt_packages})
686
+ SNAP_PACKAGES=({snap_packages})
687
+ SERVICES=({services})
688
+ VM_USER="${{SUDO_USER:-ubuntu}}"
689
+ VM_HOME="/home/$VM_USER"
690
+
691
+ # ═══════════════════════════════════════════════════════════════════════════════
692
+ # Section 0: Fix permissions for GNOME directories (runs first!)
693
+ # ═══════════════════════════════════════════════════════════════════════════════
694
+ section "0/7" "Fixing directory permissions..."
695
+ write_status "fixing_permissions" "fixing directory permissions"
696
+
697
+ GNOME_DIRS=(
698
+ "$VM_HOME/.config"
699
+ "$VM_HOME/.config/pulse"
700
+ "$VM_HOME/.config/dconf"
701
+ "$VM_HOME/.config/ibus"
702
+ "$VM_HOME/.cache"
703
+ "$VM_HOME/.cache/ibus"
704
+ "$VM_HOME/.cache/tracker3"
705
+ "$VM_HOME/.cache/mesa_shader_cache"
706
+ "$VM_HOME/.local"
707
+ "$VM_HOME/.local/share"
708
+ "$VM_HOME/.local/share/applications"
709
+ "$VM_HOME/.local/share/keyrings"
710
+ )
711
+
712
+ for dir in "${{GNOME_DIRS[@]}}"; do
713
+ if [ ! -d "$dir" ]; then
714
+ mkdir -p "$dir" 2>/dev/null && log " Created $dir" || true
715
+ fi
716
+ done
717
+
718
+ # Fix ownership for all critical directories
719
+ chown -R 1000:1000 "$VM_HOME/.config" "$VM_HOME/.cache" "$VM_HOME/.local" 2>/dev/null || true
720
+ chmod 700 "$VM_HOME/.config" "$VM_HOME/.cache" 2>/dev/null || true
721
+
722
+ # Fix snap directories ownership
723
+ for snap_dir in "$VM_HOME/snap"/*; do
724
+ [ -d "$snap_dir" ] && chown -R 1000:1000 "$snap_dir" 2>/dev/null || true
725
+ done
726
+
727
+ ok "Directory permissions fixed"
728
+
729
+ section "1/7" "Checking APT packages..."
730
+ write_status "checking_apt" "checking APT packages"
731
+ for pkg in "${{APT_PACKAGES[@]}}"; do
732
+ [ -z "$pkg" ] && continue
733
+ if check_apt "$pkg"; then
734
+ ok "$pkg"
735
+ else
736
+ repair "Installing $pkg..."
737
+ if install_apt "$pkg"; then
738
+ ok "$pkg installed"
739
+ ((REPAIRED++))
740
+ else
741
+ fail "$pkg FAILED"
742
+ fi
743
+ fi
744
+ done
745
+
746
+ section "2/7" "Checking Snap packages..."
747
+ write_status "checking_snaps" "checking snap packages"
748
+ timeout 120 snap wait system seed.loaded 2>/dev/null || true
749
+ for pkg in "${{SNAP_PACKAGES[@]}}"; do
750
+ [ -z "$pkg" ] && continue
751
+ if check_snap "$pkg"; then
752
+ ok "$pkg (snap)"
753
+ else
754
+ repair "Installing $pkg..."
755
+ if install_snap "$pkg"; then
756
+ ok "$pkg installed"
757
+ ((REPAIRED++))
758
+ else
759
+ fail "$pkg FAILED"
760
+ fi
761
+ fi
762
+ done
763
+
764
+ section "3/7" "Connecting Snap interfaces..."
765
+ write_status "connecting_interfaces" "connecting snap interfaces"
766
+ for pkg in "${{SNAP_PACKAGES[@]}}"; do
767
+ [ -z "$pkg" ] && continue
768
+ check_snap "$pkg" && connect_interfaces "$pkg"
769
+ done
770
+ systemctl restart snapd 2>/dev/null || true
771
+
772
+ section "4/7" "Testing application launch..."
773
+ write_status "testing_launch" "testing application launch"
774
+ APPS_TO_TEST=()
775
+ for pkg in "${{SNAP_PACKAGES[@]}}"; do
776
+ [ -z "$pkg" ] && continue
777
+ APPS_TO_TEST+=("$pkg")
778
+ done
779
+ if [ "{str(wants_google_chrome).lower()}" = "true" ]; then
780
+ APPS_TO_TEST+=("google-chrome")
781
+ fi
782
+ if printf '%s\n' "${{APT_PACKAGES[@]}}" | grep -qx "docker.io"; then
783
+ APPS_TO_TEST+=("docker")
784
+ fi
785
+
786
+ for app in "${{APPS_TO_TEST[@]}}"; do
787
+ [ -z "$app" ] && continue
788
+ case "$app" in
789
+ google-chrome)
790
+ if ! command -v google-chrome >/dev/null 2>&1 && ! command -v google-chrome-stable >/dev/null 2>&1; then
791
+ repair "Installing google-chrome..."
792
+ tmp_deb="/tmp/google-chrome-stable_current_amd64.deb"
793
+ if curl -fsSL -o "$tmp_deb" "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb" \
794
+ && DEBIAN_FRONTEND=noninteractive apt-get install -y "$tmp_deb" &>>"$LOG"; then
795
+ rm -f "$tmp_deb"
796
+ ((REPAIRED++))
797
+ else
798
+ rm -f "$tmp_deb" 2>/dev/null || true
799
+ fi
800
+ fi
801
+ ;;
802
+ docker)
803
+ check_apt "docker.io" || continue
804
+ ;;
805
+ *)
806
+ if check_snap "$app"; then
807
+ :
808
+ else
809
+ continue
810
+ fi
811
+ ;;
812
+ esac
813
+
814
+ if test_launch "$app"; then
815
+ ok "$app launches OK"
816
+ else
817
+ fail "$app launch test FAILED"
818
+ # Show error details in main log
819
+ if [ -f "/tmp/$app-error.log" ]; then
820
+ echo " Error details:" | tee -a "$LOG"
821
+ head -10 "/tmp/$app-error.log" | sed 's/^/ /' | tee -a "$LOG" || true
822
+ fi
823
+ fi
824
+ done
825
+
826
+ section "5/7" "Checking mount points..."
827
+ write_status "checking_mounts" "checking mount points"
828
+ while IFS= read -r line; do
829
+ tag=$(echo "$line" | awk '{{print $1}}')
830
+ mp=$(echo "$line" | awk '{{print $2}}')
831
+ if [[ "$tag" =~ ^mount[0-9]+$ ]] && [[ "$mp" == /* ]]; then
832
+ if mountpoint -q "$mp" 2>/dev/null; then
833
+ ok "$mp mounted"
834
+ else
835
+ repair "Mounting $mp..."
836
+ mkdir -p "$mp" 2>/dev/null || true
837
+ if mount "$mp" &>>"$LOG"; then
838
+ ok "$mp mounted"
839
+ ((REPAIRED++))
840
+ else
841
+ fail "$mp mount FAILED"
842
+ fi
843
+ fi
844
+ fi
845
+ done < /etc/fstab
846
+
847
+ section "6/7" "Checking services..."
848
+ write_status "checking_services" "checking services"
849
+ for svc in "${{SERVICES[@]}}"; do
850
+ [ -z "$svc" ] && continue
851
+ if systemctl is-active "$svc" &>/dev/null; then
852
+ ok "$svc running"
853
+ else
854
+ repair "Starting $svc..."
855
+ systemctl enable --now "$svc" &>/dev/null && ok "$svc started" && ((REPAIRED++)) || fail "$svc FAILED"
856
+ fi
857
+ done
858
+
859
+ header "Diagnostic Summary"
860
+ log ""
861
+ log " Total: $TOTAL"
862
+ log " ${{GREEN}}Passed:${{NC}} $PASSED"
863
+ log " ${{YELLOW}}Repaired:${{NC}} $REPAIRED"
864
+ log " ${{RED}}Failed:${{NC}} $FAILED"
865
+ log ""
866
+
867
+ write_status "complete" "complete"
868
+
869
+ if [ $FAILED -eq 0 ]; then
870
+ log "${{GREEN}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
871
+ log "${{GREEN}}${{BOLD}} ✅ All checks passed! CloneBox VM is ready.${{NC}}"
872
+ log "${{GREEN}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
873
+ exit 0
874
+ else
875
+ log "${{RED}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
876
+ log "${{RED}}${{BOLD}} ⚠️ $FAILED checks failed. See /var/log/clonebox-boot.log${{NC}}"
877
+ log "${{RED}}${{BOLD}}═══════════════════════════════════════════════════════════${{NC}}"
878
+ exit 1
879
+ fi
880
+ '''
881
+ return base64.b64encode(script.encode()).decode()
882
+
883
+ def _generate_health_check_script(self, config: VMConfig) -> str:
884
+ """Generate a health check script that validates all installed components."""
885
+ import base64
886
+
887
+ # Build package check commands
888
+ apt_checks = []
889
+ for pkg in config.packages:
890
+ apt_checks.append(f'check_apt_package "{pkg}"')
891
+
892
+ snap_checks = []
893
+ for pkg in config.snap_packages:
894
+ snap_checks.append(f'check_snap_package "{pkg}"')
895
+
896
+ service_checks = []
897
+ for svc in config.services:
898
+ service_checks.append(f'check_service "{svc}"')
899
+
900
+ mount_checks = []
901
+ for idx, (host_path, guest_path) in enumerate(config.paths.items()):
902
+ mount_checks.append(f'check_mount "{guest_path}" "mount{idx}"')
903
+
904
+ apt_checks_str = "\n".join(apt_checks) if apt_checks else "echo 'No apt packages to check'"
905
+ snap_checks_str = "\n".join(snap_checks) if snap_checks else "echo 'No snap packages to check'"
906
+ service_checks_str = "\n".join(service_checks) if service_checks else "echo 'No services to check'"
907
+ mount_checks_str = "\n".join(mount_checks) if mount_checks else "echo 'No mounts to check'"
908
+
909
+ script = f'''#!/bin/bash
910
+ # CloneBox Health Check Script
911
+ # Generated automatically - validates all installed components
912
+
913
+ REPORT_FILE="/var/log/clonebox-health.log"
914
+ PASSED=0
915
+ FAILED=0
916
+ WARNINGS=0
917
+
918
+ # Colors for output
919
+ RED='\\033[0;31m'
920
+ GREEN='\\033[0;32m'
921
+ YELLOW='\\033[1;33m'
922
+ NC='\\033[0m'
923
+
924
+ log() {{
925
+ echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$REPORT_FILE"
926
+ }}
927
+
928
+ check_apt_package() {{
929
+ local pkg="$1"
930
+ if dpkg -l "$pkg" 2>/dev/null | grep -q "^ii"; then
931
+ log "[PASS] APT package '$pkg' is installed"
932
+ ((PASSED++))
933
+ return 0
934
+ else
935
+ log "[FAIL] APT package '$pkg' is NOT installed"
936
+ ((FAILED++))
937
+ return 1
938
+ fi
939
+ }}
940
+
941
+ check_snap_package() {{
942
+ local pkg="$1"
943
+ if snap list "$pkg" &>/dev/null; then
944
+ log "[PASS] Snap package '$pkg' is installed"
945
+ ((PASSED++))
946
+ return 0
947
+ else
948
+ log "[FAIL] Snap package '$pkg' is NOT installed"
949
+ ((FAILED++))
950
+ return 1
951
+ fi
952
+ }}
953
+
954
+ check_service() {{
955
+ local svc="$1"
956
+ if systemctl is-enabled "$svc" &>/dev/null; then
957
+ if systemctl is-active "$svc" &>/dev/null; then
958
+ log "[PASS] Service '$svc' is enabled and running"
959
+ ((PASSED++))
960
+ return 0
961
+ else
962
+ log "[WARN] Service '$svc' is enabled but not running"
963
+ ((WARNINGS++))
964
+ return 1
965
+ fi
966
+ else
967
+ log "[INFO] Service '$svc' is not enabled (may be optional)"
968
+ return 0
969
+ fi
970
+ }}
971
+
972
+ check_mount() {{
973
+ local path="$1"
974
+ local tag="$2"
975
+ if mountpoint -q "$path" 2>/dev/null; then
976
+ log "[PASS] Mount '$path' ($tag) is active"
977
+ ((PASSED++))
978
+ return 0
979
+ elif [ -d "$path" ]; then
980
+ log "[WARN] Directory '$path' exists but not mounted"
981
+ ((WARNINGS++))
982
+ return 1
983
+ else
984
+ log "[INFO] Mount point '$path' does not exist yet"
985
+ return 0
986
+ fi
987
+ }}
988
+
989
+ check_gui() {{
990
+ if systemctl get-default | grep -q graphical; then
991
+ log "[PASS] System configured for graphical target"
992
+ ((PASSED++))
993
+ if systemctl is-active gdm3 &>/dev/null || systemctl is-active gdm &>/dev/null; then
994
+ log "[PASS] Display manager (GDM) is running"
995
+ ((PASSED++))
996
+ else
997
+ log "[WARN] Display manager not yet running (may start after reboot)"
998
+ ((WARNINGS++))
999
+ fi
1000
+ else
1001
+ log "[INFO] System not configured for GUI"
1002
+ fi
1003
+ }}
1004
+
1005
+ # Start health check
1006
+ log "=========================================="
1007
+ log "CloneBox Health Check Report"
1008
+ log "VM Name: {config.name}"
1009
+ log "Date: $(date)"
1010
+ log "=========================================="
1011
+
1012
+ log ""
1013
+ log "--- APT Packages ---"
1014
+ {apt_checks_str}
1015
+
1016
+ log ""
1017
+ log "--- Snap Packages ---"
1018
+ {snap_checks_str}
1019
+
1020
+ log ""
1021
+ log "--- Services ---"
1022
+ {service_checks_str}
1023
+
1024
+ log ""
1025
+ log "--- Mounts ---"
1026
+ {mount_checks_str}
1027
+
1028
+ log ""
1029
+ log "--- GUI Status ---"
1030
+ check_gui
1031
+
1032
+ log ""
1033
+ log "=========================================="
1034
+ log "Health Check Summary"
1035
+ log "=========================================="
1036
+ log "Passed: $PASSED"
1037
+ log "Failed: $FAILED"
1038
+ log "Warnings: $WARNINGS"
1039
+
1040
+ if [ $FAILED -eq 0 ]; then
1041
+ log ""
1042
+ log "[SUCCESS] All critical checks passed!"
1043
+ echo "HEALTH_STATUS=OK" > /var/log/clonebox-health-status
1044
+ exit 0
1045
+ else
1046
+ log ""
1047
+ log "[ERROR] Some checks failed. Review log for details."
1048
+ echo "HEALTH_STATUS=FAILED" > /var/log/clonebox-health-status
1049
+ exit 1
1050
+ fi
1051
+ '''
1052
+ # Encode script to base64 for safe embedding in cloud-init
1053
+ encoded = base64.b64encode(script.encode()).decode()
1054
+ return encoded
1055
+
1056
+ def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig) -> Path:
1057
+ """Create cloud-init ISO with user-data and meta-data."""
1058
+
1059
+ cloudinit_dir = vm_dir / "cloud-init"
1060
+ cloudinit_dir.mkdir(exist_ok=True)
1061
+
1062
+ # Meta-data
1063
+ meta_data = f"instance-id: {config.name}\nlocal-hostname: {config.name}\n"
1064
+ (cloudinit_dir / "meta-data").write_text(meta_data)
1065
+
1066
+ # Generate mount commands and fstab entries for 9p filesystems
1067
+ mount_commands = []
1068
+ fstab_entries = []
1069
+ all_paths = dict(config.paths) if config.paths else {}
1070
+ for idx, (host_path, guest_path) in enumerate(all_paths.items()):
1071
+ if Path(host_path).exists():
1072
+ tag = f"mount{idx}"
1073
+ # Use uid=1000,gid=1000 to give ubuntu user access to mounts
1074
+ # mmap allows proper file mapping
1075
+ mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000,users"
1076
+ mount_commands.append(f" - mkdir -p {guest_path}")
1077
+ mount_commands.append(f" - chown 1000:1000 {guest_path}")
1078
+ mount_commands.append(
1079
+ f" - mount -t 9p -o {mount_opts} {tag} {guest_path} || true"
1080
+ )
1081
+ # Add fstab entry for persistence after reboot
1082
+ fstab_entries.append(f"{tag} {guest_path} 9p {mount_opts},nofail 0 0")
1083
+
1084
+ # User-data
1085
+ # Add desktop environment if GUI is enabled
1086
+ base_packages = ["qemu-guest-agent", "cloud-guest-utils"]
1087
+ if config.gui:
1088
+ base_packages.extend([
1089
+ "ubuntu-desktop-minimal",
1090
+ "firefox",
1091
+ ])
1092
+
1093
+ all_packages = base_packages + list(config.packages)
1094
+ packages_yaml = (
1095
+ "\n".join(f" - {pkg}" for pkg in all_packages) if all_packages else ""
1096
+ )
1097
+
1098
+ # Build runcmd - services, mounts, snaps, post_commands
1099
+ runcmd_lines = []
1100
+
1101
+ runcmd_lines.append(" - systemctl enable --now qemu-guest-agent || true")
1102
+ runcmd_lines.append(" - systemctl enable --now snapd || true")
1103
+ runcmd_lines.append(" - timeout 300 snap wait system seed.loaded || true")
1104
+
1105
+ # Add service enablement
1106
+ for svc in config.services:
1107
+ runcmd_lines.append(f" - systemctl enable --now {svc} || true")
1108
+
1109
+ # Add fstab entries for persistent mounts after reboot
1110
+ if fstab_entries:
1111
+ runcmd_lines.append(" - grep -q '^# CloneBox 9p mounts' /etc/fstab || echo '# CloneBox 9p mounts' >> /etc/fstab")
1112
+ for entry in fstab_entries:
1113
+ runcmd_lines.append(f" - grep -qF \"{entry}\" /etc/fstab || echo '{entry}' >> /etc/fstab")
1114
+ runcmd_lines.append(" - mount -a || true")
1115
+
1116
+ # Add mounts (immediate, before reboot)
1117
+ for cmd in mount_commands:
1118
+ runcmd_lines.append(cmd)
1119
+
1120
+ # Install snap packages
1121
+ if config.snap_packages:
1122
+ runcmd_lines.append(" - echo 'Installing snap packages...'")
1123
+ for snap_pkg in config.snap_packages:
1124
+ runcmd_lines.append(f" - snap install {snap_pkg} --classic || snap install {snap_pkg} || true")
1125
+
1126
+ # Connect snap interfaces for GUI apps (not auto-connected via cloud-init)
1127
+ runcmd_lines.append(" - echo 'Connecting snap interfaces...'")
1128
+ for snap_pkg in config.snap_packages:
1129
+ interfaces = SNAP_INTERFACES.get(snap_pkg, DEFAULT_SNAP_INTERFACES)
1130
+ for iface in interfaces:
1131
+ runcmd_lines.append(f" - snap connect {snap_pkg}:{iface} :{iface} 2>/dev/null || true")
1132
+
1133
+ runcmd_lines.append(" - systemctl restart snapd || true")
1134
+
1135
+ # Add GUI setup if enabled - runs AFTER package installation completes
1136
+ if config.gui:
1137
+ # Create directories that GNOME services need BEFORE GUI starts
1138
+ # These may conflict with mounted host directories, so ensure they exist with correct perms
1139
+ runcmd_lines.extend([
1140
+ " - mkdir -p /home/ubuntu/.config/pulse /home/ubuntu/.cache/ibus /home/ubuntu/.local/share",
1141
+ " - mkdir -p /home/ubuntu/.config/dconf /home/ubuntu/.cache/tracker3",
1142
+ " - mkdir -p /home/ubuntu/.config/autostart",
1143
+ " - chown -R 1000:1000 /home/ubuntu/.config /home/ubuntu/.cache /home/ubuntu/.local",
1144
+ " - chmod 700 /home/ubuntu/.config /home/ubuntu/.cache",
1145
+ " - systemctl set-default graphical.target",
1146
+ " - systemctl enable gdm3 || systemctl enable gdm || true",
1147
+ ])
1148
+
1149
+ # Create autostart entries for GUI apps
1150
+ autostart_apps = {
1151
+ 'pycharm-community': ('PyCharm Community', '/snap/bin/pycharm-community', 'pycharm-community'),
1152
+ 'firefox': ('Firefox', '/snap/bin/firefox', 'firefox'),
1153
+ 'chromium': ('Chromium', '/snap/bin/chromium', 'chromium'),
1154
+ 'google-chrome': ('Google Chrome', 'google-chrome-stable', 'google-chrome'),
1155
+ }
1156
+
1157
+ for snap_pkg in config.snap_packages:
1158
+ if snap_pkg in autostart_apps:
1159
+ name, exec_cmd, icon = autostart_apps[snap_pkg]
1160
+ desktop_entry = f'''[Desktop Entry]
1161
+ Type=Application
1162
+ Name={name}
1163
+ Exec={exec_cmd}
1164
+ Icon={icon}
1165
+ X-GNOME-Autostart-enabled=true
1166
+ X-GNOME-Autostart-Delay=5
1167
+ Comment=CloneBox autostart
1168
+ '''
1169
+ import base64
1170
+ desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
1171
+ runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/{snap_pkg}.desktop")
1172
+
1173
+ # Check if google-chrome is in paths (app_data_paths)
1174
+ wants_chrome = any('/google-chrome' in str(p) for p in (config.paths or {}).values())
1175
+ if wants_chrome:
1176
+ name, exec_cmd, icon = autostart_apps['google-chrome']
1177
+ desktop_entry = f'''[Desktop Entry]
1178
+ Type=Application
1179
+ Name={name}
1180
+ Exec={exec_cmd}
1181
+ Icon={icon}
1182
+ X-GNOME-Autostart-enabled=true
1183
+ X-GNOME-Autostart-Delay=5
1184
+ Comment=CloneBox autostart
1185
+ '''
1186
+ desktop_b64 = base64.b64encode(desktop_entry.encode()).decode()
1187
+ runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > /home/ubuntu/.config/autostart/google-chrome.desktop")
1188
+
1189
+ # Fix ownership of autostart directory
1190
+ runcmd_lines.append(" - chown -R 1000:1000 /home/ubuntu/.config/autostart")
1191
+
1192
+ # Run user-defined post commands
1193
+ if config.post_commands:
1194
+ runcmd_lines.append(" - echo 'Running post-setup commands...'")
1195
+ for cmd in config.post_commands:
1196
+ runcmd_lines.append(f" - {cmd}")
1197
+
1198
+ # Generate health check script
1199
+ health_script = self._generate_health_check_script(config)
1200
+ runcmd_lines.append(f" - echo '{health_script}' | base64 -d > /usr/local/bin/clonebox-health")
1201
+ runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-health")
1202
+ runcmd_lines.append(" - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1")
1203
+ runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
1204
+
1205
+ # Generate boot diagnostic script (self-healing)
1206
+ boot_diag_script = self._generate_boot_diagnostic_script(config)
1207
+ runcmd_lines.append(f" - echo '{boot_diag_script}' | base64 -d > /usr/local/bin/clonebox-boot-diagnostic")
1208
+ runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-boot-diagnostic")
1209
+
1210
+ # Create systemd service for boot diagnostic (runs before GDM on subsequent boots)
1211
+ systemd_service = '''[Unit]
1212
+ Description=CloneBox Boot Diagnostic
1213
+ After=network-online.target snapd.service
1214
+ Before=gdm.service display-manager.service
1215
+ Wants=network-online.target
1216
+
1217
+ [Service]
1218
+ Type=oneshot
1219
+ ExecStart=/usr/local/bin/clonebox-boot-diagnostic
1220
+ StandardOutput=journal+console
1221
+ StandardError=journal+console
1222
+ TTYPath=/dev/tty1
1223
+ TTYReset=yes
1224
+ TTYVHangup=yes
1225
+ RemainAfterExit=yes
1226
+ TimeoutStartSec=600
1227
+
1228
+ [Install]
1229
+ WantedBy=multi-user.target'''
1230
+ import base64
1231
+ systemd_b64 = base64.b64encode(systemd_service.encode()).decode()
1232
+ runcmd_lines.append(f" - echo '{systemd_b64}' | base64 -d > /etc/systemd/system/clonebox-diagnostic.service")
1233
+ runcmd_lines.append(" - systemctl daemon-reload")
1234
+ runcmd_lines.append(" - systemctl enable clonebox-diagnostic.service")
1235
+ runcmd_lines.append(" - systemctl start clonebox-diagnostic.service || true")
1236
+
1237
+ # Create MOTD banner
1238
+ motd_banner = '''#!/bin/bash
1239
+ S="/var/run/clonebox-status"
1240
+ echo ""
1241
+ echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
1242
+ echo -e "\\033[1;34m CloneBox VM Status\\033[0m"
1243
+ echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
1244
+ if [ -f "$S" ]; then
1245
+ source "$S"
1246
+ if [ "${failed:-0}" -eq 0 ]; then
1247
+ echo -e " \\033[0;32m✅ All systems operational\\033[0m"
1248
+ else
1249
+ echo -e " \\033[0;31m⚠️ $failed checks failed\\033[0m"
1250
+ fi
1251
+ echo -e " Passed: ${passed:-0} | Repaired: ${repaired:-0} | Failed: ${failed:-0}"
1252
+ fi
1253
+ echo -e " Log: /var/log/clonebox-boot.log"
1254
+ echo -e "\\033[1;34m═══════════════════════════════════════════════════════════\\033[0m"
1255
+ echo ""'''
1256
+ motd_b64 = base64.b64encode(motd_banner.encode()).decode()
1257
+ runcmd_lines.append(f" - echo '{motd_b64}' | base64 -d > /etc/update-motd.d/99-clonebox")
1258
+ runcmd_lines.append(" - chmod +x /etc/update-motd.d/99-clonebox")
1259
+
1260
+ # Create user-friendly clonebox-repair script
1261
+ repair_script = r'''#!/bin/bash
1262
+ # CloneBox Repair - User-friendly repair utility for CloneBox VMs
1263
+ # Usage: clonebox-repair [--auto|--status|--logs|--help]
1264
+
1265
+ set -uo pipefail
1266
+
1267
+ RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' CYAN='\033[0;36m' NC='\033[0m' BOLD='\033[1m'
1268
+
1269
+ show_help() {
1270
+ echo -e "${BOLD}${CYAN}CloneBox Repair Utility${NC}"
1271
+ echo ""
1272
+ echo "Usage: clonebox-repair [OPTION]"
1273
+ echo ""
1274
+ echo "Options:"
1275
+ echo " --auto Run full automatic repair (same as boot diagnostic)"
1276
+ echo " --status Show current CloneBox status"
1277
+ echo " --logs Show recent repair logs"
1278
+ echo " --perms Fix directory permissions only"
1279
+ echo " --audio Fix audio (PulseAudio) and restart"
1280
+ echo " --keyring Reset GNOME Keyring (fixes password mismatch)"
1281
+ echo " --snaps Reconnect all snap interfaces only"
1282
+ echo " --mounts Remount all 9p filesystems only"
1283
+ echo " --all Run all fixes (perms + audio + snaps + mounts)"
1284
+ echo " --help Show this help message"
1285
+ echo ""
1286
+ echo "Without options, shows interactive menu."
1287
+ }
1288
+
1289
+ show_status() {
1290
+ echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1291
+ echo -e "${BOLD}${CYAN} CloneBox VM Status${NC}"
1292
+ echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1293
+
1294
+ if [ -f /var/run/clonebox-status ]; then
1295
+ source /var/run/clonebox-status
1296
+ if [ "${failed:-0}" -eq 0 ]; then
1297
+ echo -e " ${GREEN}✅ All systems operational${NC}"
1298
+ else
1299
+ echo -e " ${RED}⚠️ $failed checks failed${NC}"
1300
+ fi
1301
+ echo -e " Passed: ${passed:-0} | Repaired: ${repaired:-0} | Failed: ${failed:-0}"
1302
+ else
1303
+ echo -e " ${YELLOW}No status information available${NC}"
1304
+ fi
1305
+ echo ""
1306
+ echo -e " Last boot diagnostic: $(stat -c %y /var/log/clonebox-boot.log 2>/dev/null || echo 'never')"
1307
+ echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1308
+ }
1309
+
1310
+ show_logs() {
1311
+ echo -e "${BOLD}Recent repair logs:${NC}"
1312
+ echo ""
1313
+ tail -n 50 /var/log/clonebox-boot.log 2>/dev/null || echo "No logs found"
1314
+ }
1315
+
1316
+ fix_permissions() {
1317
+ echo -e "${CYAN}Fixing directory permissions...${NC}"
1318
+ VM_USER="${SUDO_USER:-ubuntu}"
1319
+ VM_HOME="/home/$VM_USER"
1320
+
1321
+ DIRS_TO_CREATE=(
1322
+ "$VM_HOME/.config"
1323
+ "$VM_HOME/.config/pulse"
1324
+ "$VM_HOME/.config/dconf"
1325
+ "$VM_HOME/.config/ibus"
1326
+ "$VM_HOME/.cache"
1327
+ "$VM_HOME/.cache/ibus"
1328
+ "$VM_HOME/.cache/tracker3"
1329
+ "$VM_HOME/.cache/mesa_shader_cache"
1330
+ "$VM_HOME/.local"
1331
+ "$VM_HOME/.local/share"
1332
+ "$VM_HOME/.local/share/applications"
1333
+ "$VM_HOME/.local/share/keyrings"
1334
+ )
1335
+
1336
+ for dir in "${DIRS_TO_CREATE[@]}"; do
1337
+ if [ ! -d "$dir" ]; then
1338
+ mkdir -p "$dir" 2>/dev/null && echo " Created $dir"
1339
+ fi
1340
+ done
1341
+
1342
+ chown -R 1000:1000 "$VM_HOME/.config" "$VM_HOME/.cache" "$VM_HOME/.local" 2>/dev/null
1343
+ chmod 700 "$VM_HOME/.config" "$VM_HOME/.cache" 2>/dev/null
1344
+
1345
+ for snap_dir in "$VM_HOME/snap"/*; do
1346
+ [ -d "$snap_dir" ] && chown -R 1000:1000 "$snap_dir" 2>/dev/null
1347
+ done
1348
+
1349
+ echo -e "${GREEN}✅ Permissions fixed${NC}"
1350
+ }
1351
+
1352
+ fix_audio() {
1353
+ echo -e "${CYAN}Fixing audio (PulseAudio/PipeWire)...${NC}"
1354
+ VM_USER="${SUDO_USER:-ubuntu}"
1355
+ VM_HOME="/home/$VM_USER"
1356
+
1357
+ # Create pulse config directory with correct permissions
1358
+ mkdir -p "$VM_HOME/.config/pulse" 2>/dev/null
1359
+ chown -R 1000:1000 "$VM_HOME/.config/pulse" 2>/dev/null
1360
+ chmod 700 "$VM_HOME/.config/pulse" 2>/dev/null
1361
+
1362
+ # Kill and restart audio services as user
1363
+ if [ -n "$SUDO_USER" ]; then
1364
+ sudo -u "$SUDO_USER" pulseaudio --kill 2>/dev/null || true
1365
+ sleep 1
1366
+ sudo -u "$SUDO_USER" pulseaudio --start 2>/dev/null || true
1367
+ echo " Restarted PulseAudio for $SUDO_USER"
1368
+ else
1369
+ pulseaudio --kill 2>/dev/null || true
1370
+ sleep 1
1371
+ pulseaudio --start 2>/dev/null || true
1372
+ echo " Restarted PulseAudio"
1373
+ fi
1374
+
1375
+ # Restart pipewire if available
1376
+ systemctl --user restart pipewire pipewire-pulse 2>/dev/null || true
1377
+
1378
+ echo -e "${GREEN}✅ Audio fixed${NC}"
1379
+ }
1380
+
1381
+ fix_keyring() {
1382
+ echo -e "${CYAN}Resetting GNOME Keyring...${NC}"
1383
+ VM_USER="${SUDO_USER:-ubuntu}"
1384
+ VM_HOME="/home/$VM_USER"
1385
+ KEYRING_DIR="$VM_HOME/.local/share/keyrings"
1386
+
1387
+ echo -e "${YELLOW}⚠️ This will delete existing keyrings and create a new one on next login${NC}"
1388
+ echo -e "${YELLOW} Stored passwords (WiFi, Chrome, etc.) will be lost!${NC}"
1389
+
1390
+ if [ -t 0 ]; then
1391
+ read -rp "Continue? [y/N] " confirm
1392
+ [[ "$confirm" != [yY]* ]] && { echo "Cancelled"; return; }
1393
+ fi
1394
+
1395
+ # Backup old keyrings
1396
+ if [ -d "$KEYRING_DIR" ] && [ "$(ls -A "$KEYRING_DIR" 2>/dev/null)" ]; then
1397
+ backup_dir="$VM_HOME/.local/share/keyrings.backup.$(date +%Y%m%d%H%M%S)"
1398
+ mv "$KEYRING_DIR" "$backup_dir" 2>/dev/null
1399
+ echo " Backed up to $backup_dir"
1400
+ fi
1401
+
1402
+ # Create fresh keyring directory
1403
+ mkdir -p "$KEYRING_DIR" 2>/dev/null
1404
+ chown -R 1000:1000 "$KEYRING_DIR" 2>/dev/null
1405
+ chmod 700 "$KEYRING_DIR" 2>/dev/null
1406
+
1407
+ # Kill gnome-keyring-daemon to force restart on next login
1408
+ pkill -u "$VM_USER" gnome-keyring-daemon 2>/dev/null || true
1409
+
1410
+ echo -e "${GREEN}✅ Keyring reset - log out and back in to create new keyring${NC}"
1411
+ }
1412
+
1413
+ fix_ibus() {
1414
+ echo -e "${CYAN}Fixing IBus input method...${NC}"
1415
+ VM_USER="${SUDO_USER:-ubuntu}"
1416
+ VM_HOME="/home/$VM_USER"
1417
+
1418
+ # Create ibus cache directory
1419
+ mkdir -p "$VM_HOME/.cache/ibus" 2>/dev/null
1420
+ chown -R 1000:1000 "$VM_HOME/.cache/ibus" 2>/dev/null
1421
+ chmod 700 "$VM_HOME/.cache/ibus" 2>/dev/null
1422
+
1423
+ # Restart ibus
1424
+ if [ -n "$SUDO_USER" ]; then
1425
+ sudo -u "$SUDO_USER" ibus restart 2>/dev/null || true
1426
+ else
1427
+ ibus restart 2>/dev/null || true
1428
+ fi
1429
+
1430
+ echo -e "${GREEN}✅ IBus fixed${NC}"
1431
+ }
1432
+
1433
+ fix_snaps() {
1434
+ echo -e "${CYAN}Reconnecting snap interfaces...${NC}"
1435
+ IFACES="desktop desktop-legacy x11 wayland home network audio-playback audio-record camera opengl"
1436
+
1437
+ for snap in $(snap list --color=never 2>/dev/null | tail -n +2 | awk '{print $1}'); do
1438
+ [[ "$snap" =~ ^(core|snapd|gnome-|gtk-|mesa-) ]] && continue
1439
+ echo -e " ${YELLOW}$snap${NC}"
1440
+ for iface in $IFACES; do
1441
+ snap connect "$snap:$iface" ":$iface" 2>/dev/null && echo " ✓ $iface" || true
1442
+ done
1443
+ done
1444
+
1445
+ systemctl restart snapd 2>/dev/null || true
1446
+ echo -e "${GREEN}✅ Snap interfaces reconnected${NC}"
1447
+ }
1448
+
1449
+ fix_mounts() {
1450
+ echo -e "${CYAN}Remounting filesystems...${NC}"
1451
+
1452
+ while IFS= read -r line; do
1453
+ tag=$(echo "$line" | awk '{print $1}')
1454
+ mp=$(echo "$line" | awk '{print $2}')
1455
+ if [[ "$tag" =~ ^mount[0-9]+$ ]] && [[ "$mp" == /* ]]; then
1456
+ if ! mountpoint -q "$mp" 2>/dev/null; then
1457
+ mkdir -p "$mp" 2>/dev/null
1458
+ if mount "$mp" 2>/dev/null; then
1459
+ echo -e " ${GREEN}✓${NC} $mp"
1460
+ else
1461
+ echo -e " ${RED}✗${NC} $mp (failed)"
1462
+ fi
1463
+ else
1464
+ echo -e " ${GREEN}✓${NC} $mp (already mounted)"
1465
+ fi
1466
+ fi
1467
+ done < /etc/fstab
1468
+
1469
+ echo -e "${GREEN}✅ Mounts checked${NC}"
1470
+ }
1471
+
1472
+ fix_all() {
1473
+ echo -e "${BOLD}${CYAN}Running all fixes...${NC}"
1474
+ echo ""
1475
+ fix_permissions
1476
+ echo ""
1477
+ fix_audio
1478
+ echo ""
1479
+ fix_ibus
1480
+ echo ""
1481
+ fix_snaps
1482
+ echo ""
1483
+ fix_mounts
1484
+ echo ""
1485
+ echo -e "${BOLD}${GREEN}All fixes completed!${NC}"
1486
+ }
1487
+
1488
+ interactive_menu() {
1489
+ while true; do
1490
+ echo ""
1491
+ echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1492
+ echo -e "${BOLD}${CYAN} CloneBox Repair Menu${NC}"
1493
+ echo -e "${BOLD}${CYAN}═══════════════════════════════════════════════════════════${NC}"
1494
+ echo ""
1495
+ echo " 1) Run full automatic repair (boot diagnostic)"
1496
+ echo " 2) Run all quick fixes (perms + audio + snaps + mounts)"
1497
+ echo " 3) Fix permissions only"
1498
+ echo " 4) Fix audio (PulseAudio) only"
1499
+ echo " 5) Reset GNOME Keyring (⚠️ deletes saved passwords)"
1500
+ echo " 6) Reconnect snap interfaces only"
1501
+ echo " 7) Remount filesystems only"
1502
+ echo " 8) Show status"
1503
+ echo " 9) Show logs"
1504
+ echo " q) Quit"
1505
+ echo ""
1506
+ read -rp "Select option: " choice
1507
+
1508
+ case "$choice" in
1509
+ 1) sudo /usr/local/bin/clonebox-boot-diagnostic ;;
1510
+ 2) fix_all ;;
1511
+ 3) fix_permissions ;;
1512
+ 4) fix_audio ;;
1513
+ 5) fix_keyring ;;
1514
+ 6) fix_snaps ;;
1515
+ 7) fix_mounts ;;
1516
+ 8) show_status ;;
1517
+ 9) show_logs ;;
1518
+ q|Q) exit 0 ;;
1519
+ *) echo -e "${RED}Invalid option${NC}" ;;
1520
+ esac
1521
+ done
1522
+ }
1523
+
1524
+ # Main
1525
+ case "${1:-}" in
1526
+ --auto) exec sudo /usr/local/bin/clonebox-boot-diagnostic ;;
1527
+ --all) fix_all ;;
1528
+ --status) show_status ;;
1529
+ --logs) show_logs ;;
1530
+ --perms) fix_permissions ;;
1531
+ --audio) fix_audio ;;
1532
+ --keyring) fix_keyring ;;
1533
+ --snaps) fix_snaps ;;
1534
+ --mounts) fix_mounts ;;
1535
+ --help|-h) show_help ;;
1536
+ "") interactive_menu ;;
1537
+ *) show_help; exit 1 ;;
1538
+ esac
1539
+ '''
1540
+ repair_b64 = base64.b64encode(repair_script.encode()).decode()
1541
+ runcmd_lines.append(f" - echo '{repair_b64}' | base64 -d > /usr/local/bin/clonebox-repair")
1542
+ runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-repair")
1543
+ runcmd_lines.append(" - ln -sf /usr/local/bin/clonebox-repair /usr/local/bin/cb-repair")
1544
+
1545
+ # === AUTOSTART: Systemd user services + Desktop autostart files ===
1546
+ # Create directories for user systemd services and autostart
1547
+ runcmd_lines.append(f" - mkdir -p /home/{config.username}/.config/systemd/user")
1548
+ runcmd_lines.append(f" - mkdir -p /home/{config.username}/.config/autostart")
1549
+
1550
+ # Enable lingering for the user (allows user services to run without login)
1551
+ runcmd_lines.append(f" - loginctl enable-linger {config.username}")
1552
+
1553
+ # Add environment variables for monitoring
1554
+ runcmd_lines.extend([
1555
+ " - echo 'CLONEBOX_ENABLE_MONITORING=true' >> /etc/environment",
1556
+ " - echo 'CLONEBOX_MONITOR_INTERVAL=30' >> /etc/environment",
1557
+ " - echo 'CLONEBOX_AUTO_REPAIR=true' >> /etc/environment",
1558
+ " - echo 'CLONEBOX_WATCH_APPS=true' >> /etc/environment",
1559
+ " - echo 'CLONEBOX_WATCH_SERVICES=true' >> /etc/environment",
1560
+ ])
1561
+
1562
+ # Generate autostart configurations based on installed apps (if enabled)
1563
+ autostart_apps = []
1564
+
1565
+ if getattr(config, 'autostart_apps', True):
1566
+ # Detect apps from snap_packages
1567
+ for snap_pkg in (config.snap_packages or []):
1568
+ if snap_pkg == "pycharm-community":
1569
+ autostart_apps.append({
1570
+ "name": "pycharm-community",
1571
+ "display_name": "PyCharm Community",
1572
+ "exec": "/snap/bin/pycharm-community %U",
1573
+ "type": "snap",
1574
+ "after": "graphical-session.target",
1575
+ })
1576
+ elif snap_pkg == "chromium":
1577
+ autostart_apps.append({
1578
+ "name": "chromium",
1579
+ "display_name": "Chromium Browser",
1580
+ "exec": "/snap/bin/chromium %U",
1581
+ "type": "snap",
1582
+ "after": "graphical-session.target",
1583
+ })
1584
+ elif snap_pkg == "firefox":
1585
+ autostart_apps.append({
1586
+ "name": "firefox",
1587
+ "display_name": "Firefox",
1588
+ "exec": "/snap/bin/firefox %U",
1589
+ "type": "snap",
1590
+ "after": "graphical-session.target",
1591
+ })
1592
+ elif snap_pkg == "code":
1593
+ autostart_apps.append({
1594
+ "name": "code",
1595
+ "display_name": "Visual Studio Code",
1596
+ "exec": "/snap/bin/code --new-window",
1597
+ "type": "snap",
1598
+ "after": "graphical-session.target",
1599
+ })
1600
+
1601
+ # Detect apps from packages (APT)
1602
+ for apt_pkg in (config.packages or []):
1603
+ if apt_pkg == "firefox":
1604
+ # Only add if not already added from snap
1605
+ if not any(a["name"] == "firefox" for a in autostart_apps):
1606
+ autostart_apps.append({
1607
+ "name": "firefox",
1608
+ "display_name": "Firefox",
1609
+ "exec": "/usr/bin/firefox %U",
1610
+ "type": "apt",
1611
+ "after": "graphical-session.target",
1612
+ })
1613
+
1614
+ # Check for google-chrome from app_data_paths
1615
+ for host_path, guest_path in (config.paths or {}).items():
1616
+ if guest_path == "/home/ubuntu/.config/google-chrome":
1617
+ autostart_apps.append({
1618
+ "name": "google-chrome",
1619
+ "display_name": "Google Chrome",
1620
+ "exec": "/usr/bin/google-chrome-stable %U",
1621
+ "type": "deb",
1622
+ "after": "graphical-session.target",
1623
+ })
1624
+ break
1625
+
1626
+ # Generate systemd user services for each app
1627
+ for app in autostart_apps:
1628
+ service_content = f'''[Unit]
1629
+ Description={app["display_name"]} Autostart
1630
+ After={app["after"]}
1631
+
1632
+ [Service]
1633
+ Type=simple
1634
+ Environment=DISPLAY=:0
1635
+ Environment=XDG_RUNTIME_DIR=/run/user/1000
1636
+ ExecStart={app["exec"]}
1637
+ Restart=on-failure
1638
+ RestartSec=5
1639
+
1640
+ [Install]
1641
+ WantedBy=default.target
1642
+ '''
1643
+ service_b64 = base64.b64encode(service_content.encode()).decode()
1644
+ service_path = f"/home/{config.username}/.config/systemd/user/{app['name']}.service"
1645
+ runcmd_lines.append(f" - echo '{service_b64}' | base64 -d > {service_path}")
1646
+
1647
+ # Generate desktop autostart files for GUI apps (alternative to systemd user services)
1648
+ for app in autostart_apps:
1649
+ desktop_content = f'''[Desktop Entry]
1650
+ Type=Application
1651
+ Name={app["display_name"]}
1652
+ Exec={app["exec"]}
1653
+ Hidden=false
1654
+ NoDisplay=false
1655
+ X-GNOME-Autostart-enabled=true
1656
+ X-GNOME-Autostart-Delay=5
1657
+ '''
1658
+ desktop_b64 = base64.b64encode(desktop_content.encode()).decode()
1659
+ desktop_path = f"/home/{config.username}/.config/autostart/{app['name']}.desktop"
1660
+ runcmd_lines.append(f" - echo '{desktop_b64}' | base64 -d > {desktop_path}")
1661
+
1662
+ # Fix ownership of all autostart files
1663
+ runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/.config/systemd")
1664
+ runcmd_lines.append(f" - chown -R 1000:1000 /home/{config.username}/.config/autostart")
1665
+
1666
+ # Enable systemd user services (must run as user)
1667
+ if autostart_apps:
1668
+ services_to_enable = " ".join(f"{app['name']}.service" for app in autostart_apps)
1669
+ runcmd_lines.append(f" - sudo -u {config.username} XDG_RUNTIME_DIR=/run/user/1000 systemctl --user daemon-reload || true")
1670
+ # Note: We don't enable services by default as desktop autostart is more reliable for GUI apps
1671
+ # User can enable them manually with: systemctl --user enable <service>
1672
+
1673
+ # === WEB SERVICES: System-wide services for uvicorn, nginx, etc. ===
1674
+ web_services = getattr(config, 'web_services', []) or []
1675
+ for svc in web_services:
1676
+ svc_name = svc.get("name", "clonebox-web")
1677
+ svc_desc = svc.get("description", f"CloneBox {svc_name}")
1678
+ svc_workdir = svc.get("workdir", "/mnt/project0")
1679
+ svc_exec = svc.get("exec", "uvicorn app:app --host 0.0.0.0 --port 8000")
1680
+ svc_user = svc.get("user", config.username)
1681
+ svc_after = svc.get("after", "network.target")
1682
+ svc_env = svc.get("environment", [])
1683
+
1684
+ env_lines = "\n".join(f"Environment={e}" for e in svc_env) if svc_env else ""
1685
+
1686
+ web_service_content = f'''[Unit]
1687
+ Description={svc_desc}
1688
+ After={svc_after}
1689
+
1690
+ [Service]
1691
+ Type=simple
1692
+ User={svc_user}
1693
+ WorkingDirectory={svc_workdir}
1694
+ {env_lines}
1695
+ ExecStart={svc_exec}
1696
+ Restart=always
1697
+ RestartSec=10
1698
+
1699
+ [Install]
1700
+ WantedBy=multi-user.target
1701
+ '''
1702
+ web_svc_b64 = base64.b64encode(web_service_content.encode()).decode()
1703
+ runcmd_lines.append(f" - echo '{web_svc_b64}' | base64 -d > /etc/systemd/system/{svc_name}.service")
1704
+ runcmd_lines.append(" - systemctl daemon-reload")
1705
+ runcmd_lines.append(f" - systemctl enable {svc_name}.service")
1706
+ runcmd_lines.append(f" - systemctl start {svc_name}.service || true")
1707
+
1708
+ # Install CloneBox Monitor for continuous monitoring and self-healing
1709
+ scripts_dir = Path(__file__).resolve().parent.parent.parent / "scripts"
1710
+ try:
1711
+ with open(scripts_dir / "clonebox-monitor.sh") as f:
1712
+ monitor_script = f.read()
1713
+ with open(scripts_dir / "clonebox-monitor.service") as f:
1714
+ monitor_service = f.read()
1715
+ with open(scripts_dir / "clonebox-monitor.default") as f:
1716
+ monitor_config = f.read()
1717
+ except (FileNotFoundError, OSError):
1718
+ # Fallback to embedded scripts if files not found
1719
+ monitor_script = '''#!/bin/bash
1720
+ # CloneBox Monitor - Fallback embedded version
1721
+ set -euo pipefail
1722
+ LOG_FILE="/var/log/clonebox-monitor.log"
1723
+ log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"; }
1724
+ log_info() { log "[INFO] $1"; }
1725
+ log_warn() { log "[WARN] $1"; }
1726
+ log_error() { log "[ERROR] $1"; }
1727
+ log_success() { log "[SUCCESS] $1"; }
1728
+ while true; do
1729
+ log_info "CloneBox Monitor running..."
1730
+ sleep 60
1731
+ done
1732
+ '''
1733
+ monitor_service = '''[Unit]
1734
+ Description=CloneBox Monitor
1735
+ After=graphical-session.target
1736
+ [Service]
1737
+ Type=simple
1738
+ User=ubuntu
1739
+ ExecStart=/usr/local/bin/clonebox-monitor
1740
+ Restart=always
1741
+ [Install]
1742
+ WantedBy=default.target
1743
+ '''
1744
+ monitor_config = '''# CloneBox Monitor Configuration
1745
+ CLONEBOX_MONITOR_INTERVAL=30
1746
+ CLONEBOX_AUTO_REPAIR=true
1747
+ '''
1748
+
1749
+ # Install monitor script
1750
+ monitor_b64 = base64.b64encode(monitor_script.encode()).decode()
1751
+ runcmd_lines.append(f" - echo '{monitor_b64}' | base64 -d > /usr/local/bin/clonebox-monitor")
1752
+ runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-monitor")
1753
+
1754
+ # Install monitor configuration
1755
+ config_b64 = base64.b64encode(monitor_config.encode()).decode()
1756
+ runcmd_lines.append(f" - echo '{config_b64}' | base64 -d > /etc/default/clonebox-monitor")
1757
+
1758
+ # Install systemd user service
1759
+ service_b64 = base64.b64encode(monitor_service.encode()).decode()
1760
+ runcmd_lines.append(f" - echo '{service_b64}' | base64 -d > /etc/systemd/user/clonebox-monitor.service")
1761
+
1762
+ # Enable lingering and start monitor
1763
+ runcmd_lines.extend([
1764
+ " - loginctl enable-linger ubuntu",
1765
+ " - sudo -u ubuntu systemctl --user daemon-reload",
1766
+ " - sudo -u ubuntu systemctl --user enable clonebox-monitor.service",
1767
+ " - sudo -u ubuntu systemctl --user start clonebox-monitor.service || true",
1768
+ ])
1769
+
1770
+ # Create Python monitor service for continuous diagnostics (legacy)
1771
+ monitor_script = f'''#!/usr/bin/env python3
1772
+ """CloneBox Monitor - Continuous diagnostics and app restart service."""
1773
+ import subprocess
1774
+ import time
1775
+ import os
1776
+ import sys
1777
+ import json
1778
+ from pathlib import Path
1779
+
1780
+ REQUIRED_APPS = {json.dumps([app["name"] for app in autostart_apps])}
1781
+ CHECK_INTERVAL = 60 # seconds
1782
+ LOG_FILE = "/var/log/clonebox-monitor.log"
1783
+ STATUS_FILE = "/var/run/clonebox-monitor-status.json"
1784
+
1785
+ def log(msg):
1786
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
1787
+ line = f"[{{timestamp}}] {{msg}}"
1788
+ print(line)
1789
+ try:
1790
+ with open(LOG_FILE, "a") as f:
1791
+ f.write(line + "\\n")
1792
+ except:
1793
+ pass
1794
+
1795
+ def get_running_processes():
1796
+ try:
1797
+ result = subprocess.run(["ps", "aux"], capture_output=True, text=True, timeout=10)
1798
+ return result.stdout
1799
+ except:
1800
+ return ""
1801
+
1802
+ def is_app_running(app_name, ps_output):
1803
+ patterns = {{
1804
+ "pycharm-community": ["pycharm", "idea"],
1805
+ "chromium": ["chromium"],
1806
+ "firefox": ["firefox", "firefox-esr"],
1807
+ "google-chrome": ["chrome", "google-chrome"],
1808
+ "code": ["code", "vscode"],
1809
+ }}
1810
+ for pattern in patterns.get(app_name, [app_name]):
1811
+ if pattern.lower() in ps_output.lower():
1812
+ return True
1813
+ return False
1814
+
1815
+ def restart_app(app_name):
1816
+ log(f"Restarting {{app_name}}...")
1817
+ try:
1818
+ subprocess.run(
1819
+ ["sudo", "-u", "{config.username}", "systemctl", "--user", "restart", f"{{app_name}}.service"],
1820
+ timeout=30, capture_output=True
1821
+ )
1822
+ return True
1823
+ except Exception as e:
1824
+ log(f"Failed to restart {{app_name}}: {{e}}")
1825
+ return False
1826
+
1827
+ def check_mounts():
1828
+ try:
1829
+ with open("/etc/fstab", "r") as f:
1830
+ fstab = f.read()
1831
+ for line in fstab.split("\\n"):
1832
+ parts = line.split()
1833
+ if len(parts) >= 2 and parts[0].startswith("mount"):
1834
+ mp = parts[1]
1835
+ result = subprocess.run(["mountpoint", "-q", mp], capture_output=True)
1836
+ if result.returncode != 0:
1837
+ log(f"Mount {{mp}} not active, attempting remount...")
1838
+ subprocess.run(["mount", mp], capture_output=True)
1839
+ except Exception as e:
1840
+ log(f"Mount check failed: {{e}}")
1841
+
1842
+ def write_status(status):
1843
+ try:
1844
+ with open(STATUS_FILE, "w") as f:
1845
+ json.dump(status, f)
1846
+ except:
1847
+ pass
1848
+
1849
+ def main():
1850
+ log("CloneBox Monitor started")
1851
+
1852
+ while True:
1853
+ status = {{"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S"), "apps": {{}}, "mounts_ok": True}}
1854
+
1855
+ # Check mounts
1856
+ check_mounts()
1857
+
1858
+ # Check apps (only if GUI session is active)
1859
+ if os.path.exists("/run/user/1000"):
1860
+ ps_output = get_running_processes()
1861
+ for app in REQUIRED_APPS:
1862
+ running = is_app_running(app, ps_output)
1863
+ status["apps"][app] = "running" if running else "stopped"
1864
+ # Don't auto-restart apps - user may have closed them intentionally
1865
+
1866
+ write_status(status)
1867
+ time.sleep(CHECK_INTERVAL)
1868
+
1869
+ if __name__ == "__main__":
1870
+ main()
1871
+ '''
1872
+ # Note: The bash monitor is already installed above, no need to install Python monitor
1873
+
1874
+ # Create logs disk for host access
1875
+ runcmd_lines.extend([
1876
+ " - mkdir -p /mnt/logs",
1877
+ " - truncate -s 1G /var/lib/libvirt/images/clonebox-logs.qcow2",
1878
+ " - mkfs.ext4 -F /var/lib/libvirt/images/clonebox-logs.qcow2",
1879
+ " - echo '/var/lib/libvirt/images/clonebox-logs.qcow2 /mnt/logs ext4 loop,defaults 0 0' >> /etc/fstab",
1880
+ " - mount -a",
1881
+ " - mkdir -p /mnt/logs/var/log",
1882
+ " - mkdir -p /mnt/logs/tmp",
1883
+ " - cp -r /var/log/clonebox*.log /mnt/logs/var/log/ 2>/dev/null || true",
1884
+ " - cp -r /tmp/*-error.log /mnt/logs/tmp/ 2>/dev/null || true",
1885
+ " - echo 'Logs disk mounted at /mnt/logs - accessible from host as /var/lib/libvirt/images/clonebox-logs.qcow2'",
1886
+ " - echo 'To view logs on host: sudo mount -o loop /var/lib/libvirt/images/clonebox-logs.qcow2 /mnt/clonebox-logs'",
1887
+ ])
1888
+
1889
+ # Add reboot command at the end if GUI is enabled
1890
+ if config.gui:
1891
+ runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
1892
+ runcmd_lines.append(" - sleep 10 && reboot")
1893
+
1894
+ runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
1895
+ bootcmd_yaml = "\n".join(mount_commands) if mount_commands else ""
1896
+ bootcmd_block = f"\nbootcmd:\n{bootcmd_yaml}\n" if bootcmd_yaml else ""
1897
+
1898
+ # Remove power_state - using shutdown -r instead
1899
+ power_state_yaml = ""
1900
+
1901
+ user_data = f"""#cloud-config
1902
+ hostname: {config.name}
1903
+ manage_etc_hosts: true
1904
+
1905
+ # Default user
1906
+ users:
1907
+ - name: {config.username}
1908
+ sudo: ALL=(ALL) NOPASSWD:ALL
1909
+ shell: /bin/bash
1910
+ lock_passwd: false
1911
+ groups: sudo,adm,dialout,cdrom,floppy,audio,dip,video,plugdev,netdev,docker
1912
+ plain_text_passwd: {config.password}
1913
+
1914
+ # Allow password authentication
1915
+ ssh_pwauth: true
1916
+ chpasswd:
1917
+ expire: false
1918
+
1919
+ # Make sure root partition + filesystem grows to fill the qcow2 disk size
1920
+ growpart:
1921
+ mode: auto
1922
+ devices: ["/"]
1923
+ ignore_growroot_disabled: false
1924
+ resize_rootfs: true
1925
+
1926
+ # Update package cache and upgrade
1927
+ package_update: true
1928
+ package_upgrade: false
1929
+ {bootcmd_block}
1930
+
1931
+ # Install packages (cloud-init waits for completion before runcmd)
1932
+ packages:
1933
+ {packages_yaml}
1934
+
1935
+ # Run after packages are installed
1936
+ runcmd:
1937
+ {runcmd_yaml}
1938
+ {power_state_yaml}
1939
+
1940
+ final_message: "CloneBox VM is ready after $UPTIME seconds"
1941
+ """
1942
+ (cloudinit_dir / "user-data").write_text(user_data)
1943
+
1944
+ # Create ISO
1945
+ iso_path = vm_dir / "cloud-init.iso"
1946
+ subprocess.run(
1947
+ [
1948
+ "genisoimage",
1949
+ "-output",
1950
+ str(iso_path),
1951
+ "-volid",
1952
+ "cidata",
1953
+ "-joliet",
1954
+ "-rock",
1955
+ str(cloudinit_dir / "user-data"),
1956
+ str(cloudinit_dir / "meta-data"),
1957
+ ],
1958
+ check=True,
1959
+ capture_output=True,
1960
+ )
1961
+
1962
+ return iso_path
1963
+
1964
+ def start_vm(self, vm_name: str, open_viewer: bool = True, console=None) -> bool:
1965
+ """Start a VM and optionally open virt-viewer."""
1966
+
1967
+ def log(msg):
1968
+ if console:
1969
+ console.print(msg)
1970
+ else:
1971
+ print(msg)
1972
+
1973
+ try:
1974
+ vm = self.conn.lookupByName(vm_name)
1975
+ except libvirt.libvirtError:
1976
+ log(f"[red]❌ VM '{vm_name}' not found[/]")
1977
+ return False
1978
+
1979
+ if vm.isActive():
1980
+ log(f"[yellow]⚠️ VM '{vm_name}' is already running[/]")
1981
+ else:
1982
+ log(f"[cyan]🚀 Starting VM '{vm_name}'...[/]")
1983
+ vm.create()
1984
+ log("[green]✅ VM started![/]")
1985
+
1986
+ if open_viewer:
1987
+ log("[cyan]🖥️ Opening virt-viewer...[/]")
1988
+ subprocess.Popen(
1989
+ ["virt-viewer", "-c", self.conn_uri, vm_name],
1990
+ stdout=subprocess.DEVNULL,
1991
+ stderr=subprocess.DEVNULL,
1992
+ )
1993
+
1994
+ return True
1995
+
1996
+ def stop_vm(self, vm_name: str, force: bool = False, console=None) -> bool:
1997
+ """Stop a VM."""
1998
+
1999
+ def log(msg):
2000
+ if console:
2001
+ console.print(msg)
2002
+ else:
2003
+ print(msg)
2004
+
2005
+ try:
2006
+ vm = self.conn.lookupByName(vm_name)
2007
+ except libvirt.libvirtError:
2008
+ log(f"[red]❌ VM '{vm_name}' not found[/]")
2009
+ return False
2010
+
2011
+ if not vm.isActive():
2012
+ log(f"[yellow]⚠️ VM '{vm_name}' is not running[/]")
2013
+ return True
2014
+
2015
+ if force:
2016
+ log(f"[yellow]⚡ Force stopping VM '{vm_name}'...[/]")
2017
+ vm.destroy()
2018
+ else:
2019
+ log(f"[cyan]🛑 Shutting down VM '{vm_name}'...[/]")
2020
+ vm.shutdown()
2021
+
2022
+ log("[green]✅ VM stopped![/]")
2023
+ return True
2024
+
2025
+ def delete_vm(
2026
+ self,
2027
+ vm_name: str,
2028
+ delete_storage: bool = True,
2029
+ console=None,
2030
+ ignore_not_found: bool = False,
2031
+ ) -> bool:
2032
+ """Delete a VM and optionally its storage."""
2033
+
2034
+ def log(msg):
2035
+ if console:
2036
+ console.print(msg)
2037
+ else:
2038
+ print(msg)
2039
+
2040
+ try:
2041
+ vm = self.conn.lookupByName(vm_name)
2042
+ except libvirt.libvirtError:
2043
+ log(f"[red]❌ VM '{vm_name}' not found[/]")
2044
+ return False
2045
+
2046
+ # Stop if running
2047
+ if vm.isActive():
2048
+ vm.destroy()
2049
+
2050
+ # Undefine
2051
+ vm.undefine()
2052
+ log(f"[green]✅ VM '{vm_name}' undefined[/]")
2053
+
2054
+ # Delete storage
2055
+ if delete_storage:
2056
+ vm_dir = self.get_images_dir() / vm_name
2057
+ if vm_dir.exists():
2058
+ import shutil
2059
+
2060
+ shutil.rmtree(vm_dir)
2061
+ log(f"[green]🗑️ Storage deleted: {vm_dir}[/]")
2062
+
2063
+ return True
2064
+
2065
+ def list_vms(self) -> list:
2066
+ """List all VMs."""
2067
+ vms = []
2068
+ for vm_id in self.conn.listDomainsID():
2069
+ vm = self.conn.lookupByID(vm_id)
2070
+ vms.append({"name": vm.name(), "state": "running", "uuid": vm.UUIDString()})
2071
+
2072
+ for name in self.conn.listDefinedDomains():
2073
+ vm = self.conn.lookupByName(name)
2074
+ vms.append({"name": name, "state": "stopped", "uuid": vm.UUIDString()})
2075
+
2076
+ return vms
2077
+
2078
+ def close(self):
2079
+ """Close libvirt connection."""
2080
+ if self.conn:
2081
+ self.conn.close()