clonebox 0.1.7__tar.gz → 0.1.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clonebox-0.1.7 → clonebox-0.1.9}/PKG-INFO +1 -1
- {clonebox-0.1.7 → clonebox-0.1.9}/pyproject.toml +1 -1
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox/cli.py +530 -16
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox/cloner.py +192 -9
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/PKG-INFO +1 -1
- {clonebox-0.1.7 → clonebox-0.1.9}/tests/test_network.py +16 -4
- {clonebox-0.1.7 → clonebox-0.1.9}/LICENSE +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/README.md +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/setup.cfg +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox/__init__.py +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox/__main__.py +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox/detector.py +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/SOURCES.txt +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/dependency_links.txt +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/entry_points.txt +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/requires.txt +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/src/clonebox.egg-info/top_level.txt +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/tests/test_cli.py +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/tests/test_cloner.py +0 -0
- {clonebox-0.1.7 → clonebox-0.1.9}/tests/test_detector.py +0 -0
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "clonebox"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.9"
|
|
8
8
|
description = "Clone your workstation environment to an isolated VM with selective apps, paths and services"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = {text = "Apache-2.0"}
|
|
@@ -488,6 +488,442 @@ def cmd_list(args):
|
|
|
488
488
|
console.print(table)
|
|
489
489
|
|
|
490
490
|
|
|
491
|
+
def cmd_status(args):
|
|
492
|
+
"""Check VM installation status and health from workstation."""
|
|
493
|
+
import subprocess
|
|
494
|
+
|
|
495
|
+
name = args.name
|
|
496
|
+
user_session = getattr(args, "user", False)
|
|
497
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
498
|
+
|
|
499
|
+
# If name is a path, load config to get VM name
|
|
500
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
501
|
+
target_path = Path(name).expanduser().resolve()
|
|
502
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
503
|
+
if config_file.exists():
|
|
504
|
+
config = load_clonebox_config(config_file)
|
|
505
|
+
name = config["vm"]["name"]
|
|
506
|
+
else:
|
|
507
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
508
|
+
return
|
|
509
|
+
|
|
510
|
+
if not name:
|
|
511
|
+
# Try current directory
|
|
512
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
513
|
+
if config_file.exists():
|
|
514
|
+
config = load_clonebox_config(config_file)
|
|
515
|
+
name = config["vm"]["name"]
|
|
516
|
+
else:
|
|
517
|
+
console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
|
|
518
|
+
return
|
|
519
|
+
|
|
520
|
+
console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
|
|
521
|
+
|
|
522
|
+
# Check VM state
|
|
523
|
+
try:
|
|
524
|
+
result = subprocess.run(
|
|
525
|
+
["virsh", "--connect", conn_uri, "domstate", name],
|
|
526
|
+
capture_output=True, text=True, timeout=5
|
|
527
|
+
)
|
|
528
|
+
vm_state = result.stdout.strip()
|
|
529
|
+
|
|
530
|
+
if "running" in vm_state.lower():
|
|
531
|
+
console.print(f"[green]✅ VM State: {vm_state}[/]")
|
|
532
|
+
elif "shut off" in vm_state.lower():
|
|
533
|
+
console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
|
|
534
|
+
console.print("[dim]Start with: clonebox start .[/]")
|
|
535
|
+
return
|
|
536
|
+
else:
|
|
537
|
+
console.print(f"[dim]VM State: {vm_state}[/]")
|
|
538
|
+
except subprocess.TimeoutExpired:
|
|
539
|
+
console.print("[red]❌ Timeout checking VM state[/]")
|
|
540
|
+
return
|
|
541
|
+
except Exception as e:
|
|
542
|
+
console.print(f"[red]❌ Error: {e}[/]")
|
|
543
|
+
return
|
|
544
|
+
|
|
545
|
+
# Get VM IP address
|
|
546
|
+
console.print("\n[bold]🔍 Checking VM network...[/]")
|
|
547
|
+
try:
|
|
548
|
+
result = subprocess.run(
|
|
549
|
+
["virsh", "--connect", conn_uri, "domifaddr", name],
|
|
550
|
+
capture_output=True, text=True, timeout=10
|
|
551
|
+
)
|
|
552
|
+
if result.stdout.strip():
|
|
553
|
+
console.print(f"[dim]{result.stdout.strip()}[/]")
|
|
554
|
+
# Extract IP
|
|
555
|
+
for line in result.stdout.split('\n'):
|
|
556
|
+
if 'ipv4' in line.lower():
|
|
557
|
+
parts = line.split()
|
|
558
|
+
for p in parts:
|
|
559
|
+
if '/' in p and '.' in p:
|
|
560
|
+
ip = p.split('/')[0]
|
|
561
|
+
console.print(f"[green]IP Address: {ip}[/]")
|
|
562
|
+
break
|
|
563
|
+
else:
|
|
564
|
+
console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
|
|
565
|
+
except Exception as e:
|
|
566
|
+
console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
|
|
567
|
+
|
|
568
|
+
# Check cloud-init status via console
|
|
569
|
+
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
570
|
+
try:
|
|
571
|
+
# Use virsh console to check - this is tricky, so we check for the ready file
|
|
572
|
+
result = subprocess.run(
|
|
573
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
574
|
+
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
|
|
575
|
+
capture_output=True, text=True, timeout=10
|
|
576
|
+
)
|
|
577
|
+
if "CloneBox VM ready" in result.stdout or result.returncode == 0:
|
|
578
|
+
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
579
|
+
else:
|
|
580
|
+
console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
|
|
581
|
+
except Exception:
|
|
582
|
+
console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
|
|
583
|
+
|
|
584
|
+
# Check health status if available
|
|
585
|
+
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
586
|
+
try:
|
|
587
|
+
result = subprocess.run(
|
|
588
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
589
|
+
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
|
|
590
|
+
capture_output=True, text=True, timeout=10
|
|
591
|
+
)
|
|
592
|
+
if "HEALTH_STATUS=OK" in result.stdout:
|
|
593
|
+
console.print("[green]✅ Health: All checks passed[/]")
|
|
594
|
+
elif "HEALTH_STATUS=FAILED" in result.stdout:
|
|
595
|
+
console.print("[red]❌ Health: Some checks failed[/]")
|
|
596
|
+
else:
|
|
597
|
+
console.print("[yellow]⏳ Health check not yet run[/]")
|
|
598
|
+
except Exception:
|
|
599
|
+
console.print("[dim]Health status: Not available yet[/]")
|
|
600
|
+
|
|
601
|
+
# Show useful commands
|
|
602
|
+
console.print("\n[bold]📋 Useful commands:[/]")
|
|
603
|
+
console.print(f" [cyan]virt-viewer --connect {conn_uri} {name}[/] # Open GUI")
|
|
604
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/] # Console access")
|
|
605
|
+
console.print(" [dim]Inside VM:[/]")
|
|
606
|
+
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
|
|
607
|
+
console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
|
|
608
|
+
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
609
|
+
|
|
610
|
+
# Run full health check if requested
|
|
611
|
+
if getattr(args, "health", False):
|
|
612
|
+
console.print("\n[bold]🔄 Running full health check...[/]")
|
|
613
|
+
try:
|
|
614
|
+
result = subprocess.run(
|
|
615
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
616
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
|
|
617
|
+
capture_output=True, text=True, timeout=60
|
|
618
|
+
)
|
|
619
|
+
console.print("[green]Health check triggered. View results with:[/]")
|
|
620
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/]")
|
|
621
|
+
console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
|
|
622
|
+
except Exception as e:
|
|
623
|
+
console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
def cmd_export(args):
|
|
627
|
+
"""Export VM and data for migration to another workstation."""
|
|
628
|
+
import subprocess
|
|
629
|
+
import tarfile
|
|
630
|
+
import shutil
|
|
631
|
+
|
|
632
|
+
name = args.name
|
|
633
|
+
user_session = getattr(args, "user", False)
|
|
634
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
635
|
+
include_data = getattr(args, "include_data", False)
|
|
636
|
+
output = getattr(args, "output", None)
|
|
637
|
+
|
|
638
|
+
# If name is a path, load config
|
|
639
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
640
|
+
target_path = Path(name).expanduser().resolve()
|
|
641
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
642
|
+
if config_file.exists():
|
|
643
|
+
config = load_clonebox_config(config_file)
|
|
644
|
+
name = config["vm"]["name"]
|
|
645
|
+
else:
|
|
646
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
647
|
+
return
|
|
648
|
+
|
|
649
|
+
if not name:
|
|
650
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
651
|
+
if config_file.exists():
|
|
652
|
+
config = load_clonebox_config(config_file)
|
|
653
|
+
name = config["vm"]["name"]
|
|
654
|
+
else:
|
|
655
|
+
console.print("[red]❌ No VM name specified[/]")
|
|
656
|
+
return
|
|
657
|
+
|
|
658
|
+
console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
|
|
659
|
+
|
|
660
|
+
# Determine storage path
|
|
661
|
+
if user_session:
|
|
662
|
+
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
663
|
+
else:
|
|
664
|
+
storage_base = Path("/var/lib/libvirt/images")
|
|
665
|
+
|
|
666
|
+
vm_dir = storage_base / name
|
|
667
|
+
|
|
668
|
+
if not vm_dir.exists():
|
|
669
|
+
console.print(f"[red]❌ VM storage not found: {vm_dir}[/]")
|
|
670
|
+
return
|
|
671
|
+
|
|
672
|
+
# Create export directory
|
|
673
|
+
export_name = output or f"{name}-export.tar.gz"
|
|
674
|
+
if not export_name.endswith(".tar.gz"):
|
|
675
|
+
export_name += ".tar.gz"
|
|
676
|
+
|
|
677
|
+
export_path = Path(export_name).resolve()
|
|
678
|
+
temp_dir = Path(f"/tmp/clonebox-export-{name}")
|
|
679
|
+
|
|
680
|
+
try:
|
|
681
|
+
# Clean up temp dir if exists
|
|
682
|
+
if temp_dir.exists():
|
|
683
|
+
shutil.rmtree(temp_dir)
|
|
684
|
+
temp_dir.mkdir(parents=True)
|
|
685
|
+
|
|
686
|
+
# Stop VM if running
|
|
687
|
+
console.print("[cyan]Stopping VM for export...[/]")
|
|
688
|
+
subprocess.run(
|
|
689
|
+
["virsh", "--connect", conn_uri, "shutdown", name],
|
|
690
|
+
capture_output=True, timeout=30
|
|
691
|
+
)
|
|
692
|
+
import time
|
|
693
|
+
time.sleep(5)
|
|
694
|
+
subprocess.run(
|
|
695
|
+
["virsh", "--connect", conn_uri, "destroy", name],
|
|
696
|
+
capture_output=True, timeout=10
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Export VM XML
|
|
700
|
+
console.print("[cyan]Exporting VM definition...[/]")
|
|
701
|
+
result = subprocess.run(
|
|
702
|
+
["virsh", "--connect", conn_uri, "dumpxml", name],
|
|
703
|
+
capture_output=True, text=True, timeout=30
|
|
704
|
+
)
|
|
705
|
+
(temp_dir / "vm.xml").write_text(result.stdout)
|
|
706
|
+
|
|
707
|
+
# Copy disk image
|
|
708
|
+
console.print("[cyan]Copying disk image (this may take a while)...[/]")
|
|
709
|
+
disk_image = vm_dir / f"{name}.qcow2"
|
|
710
|
+
if disk_image.exists():
|
|
711
|
+
shutil.copy2(disk_image, temp_dir / "disk.qcow2")
|
|
712
|
+
|
|
713
|
+
# Copy cloud-init ISO
|
|
714
|
+
cloudinit_iso = vm_dir / "cloud-init.iso"
|
|
715
|
+
if cloudinit_iso.exists():
|
|
716
|
+
shutil.copy2(cloudinit_iso, temp_dir / "cloud-init.iso")
|
|
717
|
+
|
|
718
|
+
# Copy config file
|
|
719
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
720
|
+
if config_file.exists():
|
|
721
|
+
shutil.copy2(config_file, temp_dir / ".clonebox.yaml")
|
|
722
|
+
|
|
723
|
+
# Copy .env file (without sensitive data warning)
|
|
724
|
+
env_file = Path.cwd() / ".env"
|
|
725
|
+
if env_file.exists():
|
|
726
|
+
shutil.copy2(env_file, temp_dir / ".env")
|
|
727
|
+
|
|
728
|
+
# Include shared data if requested
|
|
729
|
+
if include_data:
|
|
730
|
+
console.print("[cyan]Bundling shared data (browser profiles, configs)...[/]")
|
|
731
|
+
data_dir = temp_dir / "data"
|
|
732
|
+
data_dir.mkdir()
|
|
733
|
+
|
|
734
|
+
# Load config to get paths
|
|
735
|
+
if config_file.exists():
|
|
736
|
+
config = load_clonebox_config(config_file)
|
|
737
|
+
all_paths = config.get("paths", {}).copy()
|
|
738
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
739
|
+
|
|
740
|
+
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
741
|
+
host_p = Path(host_path)
|
|
742
|
+
if host_p.exists():
|
|
743
|
+
dest = data_dir / f"mount{idx}"
|
|
744
|
+
console.print(f" [dim]Copying {host_path}...[/]")
|
|
745
|
+
try:
|
|
746
|
+
if host_p.is_dir():
|
|
747
|
+
shutil.copytree(host_p, dest, symlinks=True,
|
|
748
|
+
ignore=shutil.ignore_patterns('*.pyc', '__pycache__', '.git'))
|
|
749
|
+
else:
|
|
750
|
+
shutil.copy2(host_p, dest)
|
|
751
|
+
except Exception as e:
|
|
752
|
+
console.print(f" [yellow]⚠️ Skipped {host_path}: {e}[/]")
|
|
753
|
+
|
|
754
|
+
# Save path mapping
|
|
755
|
+
import json
|
|
756
|
+
(data_dir / "paths.json").write_text(json.dumps(all_paths, indent=2))
|
|
757
|
+
|
|
758
|
+
# Create tarball
|
|
759
|
+
console.print(f"[cyan]Creating archive: {export_path}[/]")
|
|
760
|
+
with tarfile.open(export_path, "w:gz") as tar:
|
|
761
|
+
tar.add(temp_dir, arcname=name)
|
|
762
|
+
|
|
763
|
+
# Get size
|
|
764
|
+
size_mb = export_path.stat().st_size / 1024 / 1024
|
|
765
|
+
|
|
766
|
+
console.print(f"\n[bold green]✅ Export complete![/]")
|
|
767
|
+
console.print(f" File: [cyan]{export_path}[/]")
|
|
768
|
+
console.print(f" Size: [cyan]{size_mb:.1f} MB[/]")
|
|
769
|
+
console.print(f"\n[bold]To import on another workstation:[/]")
|
|
770
|
+
console.print(f" [cyan]clonebox import {export_path.name}[/]")
|
|
771
|
+
|
|
772
|
+
finally:
|
|
773
|
+
# Cleanup
|
|
774
|
+
if temp_dir.exists():
|
|
775
|
+
shutil.rmtree(temp_dir)
|
|
776
|
+
|
|
777
|
+
# Restart VM
|
|
778
|
+
console.print("\n[cyan]Restarting VM...[/]")
|
|
779
|
+
subprocess.run(
|
|
780
|
+
["virsh", "--connect", conn_uri, "start", name],
|
|
781
|
+
capture_output=True, timeout=30
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def cmd_import(args):
|
|
786
|
+
"""Import VM from export archive."""
|
|
787
|
+
import subprocess
|
|
788
|
+
import tarfile
|
|
789
|
+
import shutil
|
|
790
|
+
|
|
791
|
+
archive_path = Path(args.archive).resolve()
|
|
792
|
+
user_session = getattr(args, "user", False)
|
|
793
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
794
|
+
|
|
795
|
+
if not archive_path.exists():
|
|
796
|
+
console.print(f"[red]❌ Archive not found: {archive_path}[/]")
|
|
797
|
+
return
|
|
798
|
+
|
|
799
|
+
console.print(f"[bold cyan]📥 Importing VM from: {archive_path}[/]\n")
|
|
800
|
+
|
|
801
|
+
# Determine storage path
|
|
802
|
+
if user_session:
|
|
803
|
+
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
804
|
+
else:
|
|
805
|
+
storage_base = Path("/var/lib/libvirt/images")
|
|
806
|
+
|
|
807
|
+
storage_base.mkdir(parents=True, exist_ok=True)
|
|
808
|
+
|
|
809
|
+
temp_dir = Path(f"/tmp/clonebox-import-{archive_path.stem}")
|
|
810
|
+
|
|
811
|
+
try:
|
|
812
|
+
# Extract archive
|
|
813
|
+
console.print("[cyan]Extracting archive...[/]")
|
|
814
|
+
if temp_dir.exists():
|
|
815
|
+
shutil.rmtree(temp_dir)
|
|
816
|
+
temp_dir.mkdir(parents=True)
|
|
817
|
+
|
|
818
|
+
with tarfile.open(archive_path, "r:gz") as tar:
|
|
819
|
+
tar.extractall(temp_dir)
|
|
820
|
+
|
|
821
|
+
# Find extracted VM directory
|
|
822
|
+
vm_dirs = list(temp_dir.iterdir())
|
|
823
|
+
if not vm_dirs:
|
|
824
|
+
console.print("[red]❌ Empty archive[/]")
|
|
825
|
+
return
|
|
826
|
+
|
|
827
|
+
extracted_dir = vm_dirs[0]
|
|
828
|
+
vm_name = extracted_dir.name
|
|
829
|
+
|
|
830
|
+
console.print(f"[cyan]VM Name: {vm_name}[/]")
|
|
831
|
+
|
|
832
|
+
# Create VM storage directory
|
|
833
|
+
vm_storage = storage_base / vm_name
|
|
834
|
+
if vm_storage.exists():
|
|
835
|
+
if not getattr(args, "replace", False):
|
|
836
|
+
console.print(f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]")
|
|
837
|
+
return
|
|
838
|
+
shutil.rmtree(vm_storage)
|
|
839
|
+
|
|
840
|
+
vm_storage.mkdir(parents=True)
|
|
841
|
+
|
|
842
|
+
# Copy disk image
|
|
843
|
+
console.print("[cyan]Copying disk image...[/]")
|
|
844
|
+
disk_src = extracted_dir / "disk.qcow2"
|
|
845
|
+
if disk_src.exists():
|
|
846
|
+
shutil.copy2(disk_src, vm_storage / f"{vm_name}.qcow2")
|
|
847
|
+
|
|
848
|
+
# Copy cloud-init ISO
|
|
849
|
+
cloudinit_src = extracted_dir / "cloud-init.iso"
|
|
850
|
+
if cloudinit_src.exists():
|
|
851
|
+
shutil.copy2(cloudinit_src, vm_storage / "cloud-init.iso")
|
|
852
|
+
|
|
853
|
+
# Copy config files to current directory
|
|
854
|
+
config_src = extracted_dir / ".clonebox.yaml"
|
|
855
|
+
if config_src.exists():
|
|
856
|
+
shutil.copy2(config_src, Path.cwd() / ".clonebox.yaml")
|
|
857
|
+
console.print("[green]✅ Copied .clonebox.yaml[/]")
|
|
858
|
+
|
|
859
|
+
env_src = extracted_dir / ".env"
|
|
860
|
+
if env_src.exists():
|
|
861
|
+
shutil.copy2(env_src, Path.cwd() / ".env")
|
|
862
|
+
console.print("[green]✅ Copied .env[/]")
|
|
863
|
+
|
|
864
|
+
# Restore data if included
|
|
865
|
+
data_dir = extracted_dir / "data"
|
|
866
|
+
if data_dir.exists():
|
|
867
|
+
import json
|
|
868
|
+
paths_file = data_dir / "paths.json"
|
|
869
|
+
if paths_file.exists():
|
|
870
|
+
paths_mapping = json.loads(paths_file.read_text())
|
|
871
|
+
console.print("\n[cyan]Restoring shared data...[/]")
|
|
872
|
+
|
|
873
|
+
for idx, (host_path, guest_path) in enumerate(paths_mapping.items()):
|
|
874
|
+
src = data_dir / f"mount{idx}"
|
|
875
|
+
if src.exists():
|
|
876
|
+
dest = Path(host_path)
|
|
877
|
+
console.print(f" [dim]Restoring to {host_path}...[/]")
|
|
878
|
+
try:
|
|
879
|
+
if dest.exists():
|
|
880
|
+
console.print(f" [yellow]⚠️ Skipped (already exists)[/]")
|
|
881
|
+
else:
|
|
882
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
883
|
+
if src.is_dir():
|
|
884
|
+
shutil.copytree(src, dest)
|
|
885
|
+
else:
|
|
886
|
+
shutil.copy2(src, dest)
|
|
887
|
+
except Exception as e:
|
|
888
|
+
console.print(f" [yellow]⚠️ Error: {e}[/]")
|
|
889
|
+
|
|
890
|
+
# Modify and define VM XML
|
|
891
|
+
console.print("\n[cyan]Defining VM...[/]")
|
|
892
|
+
xml_src = extracted_dir / "vm.xml"
|
|
893
|
+
if xml_src.exists():
|
|
894
|
+
xml_content = xml_src.read_text()
|
|
895
|
+
|
|
896
|
+
# Update paths in XML to new storage location
|
|
897
|
+
# This is a simple replacement - may need more sophisticated handling
|
|
898
|
+
xml_content = xml_content.replace(
|
|
899
|
+
f"/home/", f"{Path.home()}/"
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
# Write modified XML
|
|
903
|
+
modified_xml = temp_dir / "vm-modified.xml"
|
|
904
|
+
modified_xml.write_text(xml_content)
|
|
905
|
+
|
|
906
|
+
# Define VM
|
|
907
|
+
result = subprocess.run(
|
|
908
|
+
["virsh", "--connect", conn_uri, "define", str(modified_xml)],
|
|
909
|
+
capture_output=True, text=True, timeout=30
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
if result.returncode == 0:
|
|
913
|
+
console.print(f"[green]✅ VM '{vm_name}' defined successfully![/]")
|
|
914
|
+
else:
|
|
915
|
+
console.print(f"[yellow]⚠️ VM definition warning: {result.stderr}[/]")
|
|
916
|
+
|
|
917
|
+
console.print(f"\n[bold green]✅ Import complete![/]")
|
|
918
|
+
console.print(f"\n[bold]To start the VM:[/]")
|
|
919
|
+
console.print(f" [cyan]clonebox start . {'--user' if user_session else ''}[/]")
|
|
920
|
+
|
|
921
|
+
finally:
|
|
922
|
+
# Cleanup
|
|
923
|
+
if temp_dir.exists():
|
|
924
|
+
shutil.rmtree(temp_dir)
|
|
925
|
+
|
|
926
|
+
|
|
491
927
|
CLONEBOX_CONFIG_FILE = ".clonebox.yaml"
|
|
492
928
|
CLONEBOX_ENV_FILE = ".env"
|
|
493
929
|
|
|
@@ -602,6 +1038,18 @@ def generate_clonebox_yaml(
|
|
|
602
1038
|
if host_folder.exists() and str(host_folder) not in paths_mapping:
|
|
603
1039
|
paths_mapping[str(host_folder)] = guest_folder
|
|
604
1040
|
|
|
1041
|
+
# Detect and add app-specific data directories for running applications
|
|
1042
|
+
# This includes browser profiles, IDE settings, credentials, extensions, etc.
|
|
1043
|
+
app_data_dirs = detector.detect_app_data_dirs(snapshot.applications)
|
|
1044
|
+
app_data_mapping = {}
|
|
1045
|
+
for app_data in app_data_dirs:
|
|
1046
|
+
host_path = app_data["path"]
|
|
1047
|
+
if host_path not in paths_mapping:
|
|
1048
|
+
# Map to same relative path in VM user home
|
|
1049
|
+
rel_path = host_path.replace(str(home_dir), "").lstrip("/")
|
|
1050
|
+
guest_path = f"/home/ubuntu/{rel_path}"
|
|
1051
|
+
app_data_mapping[host_path] = guest_path
|
|
1052
|
+
|
|
605
1053
|
# Determine VM name
|
|
606
1054
|
if not vm_name:
|
|
607
1055
|
if target_path:
|
|
@@ -654,15 +1102,20 @@ def generate_clonebox_yaml(
|
|
|
654
1102
|
"snap_packages": all_snap_packages,
|
|
655
1103
|
"post_commands": [], # User can add custom commands to run after setup
|
|
656
1104
|
"paths": paths_mapping,
|
|
1105
|
+
"app_data_paths": app_data_mapping, # App-specific config/data directories
|
|
657
1106
|
"detected": {
|
|
658
1107
|
"running_apps": [
|
|
659
|
-
{"name": a.name, "cwd": a.working_dir, "memory_mb": round(a.memory_mb)}
|
|
1108
|
+
{"name": a.name, "cwd": a.working_dir or "", "memory_mb": round(a.memory_mb)}
|
|
660
1109
|
for a in snapshot.applications[:10]
|
|
661
1110
|
],
|
|
1111
|
+
"app_data_dirs": [
|
|
1112
|
+
{"path": d["path"], "app": d["app"], "size_mb": d["size_mb"]}
|
|
1113
|
+
for d in app_data_dirs[:15]
|
|
1114
|
+
],
|
|
662
1115
|
"all_paths": {
|
|
663
|
-
"projects": paths_by_type["project"],
|
|
664
|
-
"configs": paths_by_type["config"][:5],
|
|
665
|
-
"data": paths_by_type["data"][:5],
|
|
1116
|
+
"projects": list(paths_by_type["project"]),
|
|
1117
|
+
"configs": list(paths_by_type["config"][:5]),
|
|
1118
|
+
"data": list(paths_by_type["data"][:5]),
|
|
666
1119
|
},
|
|
667
1120
|
},
|
|
668
1121
|
}
|
|
@@ -692,7 +1145,7 @@ def load_clonebox_config(path: Path) -> dict:
|
|
|
692
1145
|
return config
|
|
693
1146
|
|
|
694
1147
|
|
|
695
|
-
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int =
|
|
1148
|
+
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 900):
|
|
696
1149
|
"""Monitor cloud-init status in VM and show progress."""
|
|
697
1150
|
import subprocess
|
|
698
1151
|
import time
|
|
@@ -746,15 +1199,17 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
746
1199
|
time.sleep(2)
|
|
747
1200
|
break
|
|
748
1201
|
|
|
749
|
-
# Estimate remaining time
|
|
1202
|
+
# Estimate remaining time (total ~12-15 minutes for full desktop install)
|
|
750
1203
|
if elapsed < 60:
|
|
751
|
-
remaining = "~
|
|
752
|
-
elif elapsed < 180:
|
|
753
|
-
remaining = f"~{8 - minutes} minutes"
|
|
1204
|
+
remaining = "~12-15 minutes"
|
|
754
1205
|
elif elapsed < 300:
|
|
755
|
-
remaining = f"~{
|
|
1206
|
+
remaining = f"~{12 - minutes} minutes"
|
|
1207
|
+
elif elapsed < 600:
|
|
1208
|
+
remaining = f"~{10 - minutes} minutes"
|
|
1209
|
+
elif elapsed < 800:
|
|
1210
|
+
remaining = "finishing soon..."
|
|
756
1211
|
else:
|
|
757
|
-
remaining = "
|
|
1212
|
+
remaining = "almost done"
|
|
758
1213
|
|
|
759
1214
|
if restart_detected:
|
|
760
1215
|
progress.update(task, description=f"[cyan]Starting GUI... ({minutes}m {seconds}s, {remaining})")
|
|
@@ -781,13 +1236,17 @@ def create_vm_from_config(
|
|
|
781
1236
|
replace: bool = False,
|
|
782
1237
|
) -> str:
|
|
783
1238
|
"""Create VM from YAML config dict."""
|
|
1239
|
+
# Merge paths and app_data_paths
|
|
1240
|
+
all_paths = config.get("paths", {}).copy()
|
|
1241
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
1242
|
+
|
|
784
1243
|
vm_config = VMConfig(
|
|
785
1244
|
name=config["vm"]["name"],
|
|
786
1245
|
ram_mb=config["vm"].get("ram_mb", 4096),
|
|
787
1246
|
vcpus=config["vm"].get("vcpus", 4),
|
|
788
1247
|
gui=config["vm"].get("gui", True),
|
|
789
1248
|
base_image=config["vm"].get("base_image"),
|
|
790
|
-
paths=
|
|
1249
|
+
paths=all_paths,
|
|
791
1250
|
packages=config.get("packages", []),
|
|
792
1251
|
snap_packages=config.get("snap_packages", []),
|
|
793
1252
|
services=config.get("services", []),
|
|
@@ -915,16 +1374,27 @@ def cmd_clone(args):
|
|
|
915
1374
|
password = config['vm'].get('password', 'ubuntu')
|
|
916
1375
|
console.print("\n[bold yellow]⏰ GUI Setup Process:[/]")
|
|
917
1376
|
console.print(" [yellow]•[/] Installing desktop environment (~5-10 minutes)")
|
|
1377
|
+
console.print(" [yellow]•[/] Running health checks on all components")
|
|
918
1378
|
console.print(" [yellow]•[/] Automatic restart after installation")
|
|
919
1379
|
console.print(" [yellow]•[/] GUI login screen will appear")
|
|
920
1380
|
console.print(f" [yellow]•[/] Login: [cyan]{username}[/] / [cyan]{'*' * len(password)}[/] (from .env)")
|
|
921
1381
|
console.print("\n[dim]💡 Progress will be monitored automatically below[/]")
|
|
922
1382
|
|
|
1383
|
+
# Show health check info
|
|
1384
|
+
console.print("\n[bold]📊 Health Check (inside VM):[/]")
|
|
1385
|
+
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # View full report")
|
|
1386
|
+
console.print(" [cyan]cat /var/log/clonebox-health-status[/] # Quick status")
|
|
1387
|
+
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
1388
|
+
|
|
923
1389
|
# Show mount instructions
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
1390
|
+
all_paths = config.get("paths", {}).copy()
|
|
1391
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
1392
|
+
if all_paths:
|
|
1393
|
+
console.print("\n[bold]📁 Mounted paths (automatic):[/]")
|
|
1394
|
+
for idx, (host, guest) in enumerate(list(all_paths.items())[:5]):
|
|
1395
|
+
console.print(f" [dim]{host}[/] → [cyan]{guest}[/]")
|
|
1396
|
+
if len(all_paths) > 5:
|
|
1397
|
+
console.print(f" [dim]... and {len(all_paths) - 5} more paths[/]")
|
|
928
1398
|
except PermissionError as e:
|
|
929
1399
|
console.print(f"[red]❌ Permission Error:[/]\n{e}")
|
|
930
1400
|
console.print("\n[yellow]💡 Try running with --user flag:[/]")
|
|
@@ -1146,6 +1616,50 @@ def main():
|
|
|
1146
1616
|
)
|
|
1147
1617
|
clone_parser.set_defaults(func=cmd_clone)
|
|
1148
1618
|
|
|
1619
|
+
# Status command - check VM health from workstation
|
|
1620
|
+
status_parser = subparsers.add_parser("status", help="Check VM installation status and health")
|
|
1621
|
+
status_parser.add_argument(
|
|
1622
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
1623
|
+
)
|
|
1624
|
+
status_parser.add_argument(
|
|
1625
|
+
"-u",
|
|
1626
|
+
"--user",
|
|
1627
|
+
action="store_true",
|
|
1628
|
+
help="Use user session (qemu:///session)",
|
|
1629
|
+
)
|
|
1630
|
+
status_parser.add_argument(
|
|
1631
|
+
"--health", "-H", action="store_true", help="Run full health check"
|
|
1632
|
+
)
|
|
1633
|
+
status_parser.set_defaults(func=cmd_status)
|
|
1634
|
+
|
|
1635
|
+
# Export command - package VM for migration
|
|
1636
|
+
export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
|
|
1637
|
+
export_parser.add_argument(
|
|
1638
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
1639
|
+
)
|
|
1640
|
+
export_parser.add_argument(
|
|
1641
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
1642
|
+
)
|
|
1643
|
+
export_parser.add_argument(
|
|
1644
|
+
"-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
|
|
1645
|
+
)
|
|
1646
|
+
export_parser.add_argument(
|
|
1647
|
+
"--include-data", "-d", action="store_true",
|
|
1648
|
+
help="Include shared data (browser profiles, configs) in export"
|
|
1649
|
+
)
|
|
1650
|
+
export_parser.set_defaults(func=cmd_export)
|
|
1651
|
+
|
|
1652
|
+
# Import command - restore VM from export
|
|
1653
|
+
import_parser = subparsers.add_parser("import", help="Import VM from export archive")
|
|
1654
|
+
import_parser.add_argument("archive", help="Path to export archive (.tar.gz)")
|
|
1655
|
+
import_parser.add_argument(
|
|
1656
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
1657
|
+
)
|
|
1658
|
+
import_parser.add_argument(
|
|
1659
|
+
"--replace", action="store_true", help="Replace existing VM if exists"
|
|
1660
|
+
)
|
|
1661
|
+
import_parser.set_defaults(func=cmd_import)
|
|
1662
|
+
|
|
1149
1663
|
args = parser.parse_args()
|
|
1150
1664
|
|
|
1151
1665
|
if hasattr(args, "func"):
|
|
@@ -476,6 +476,179 @@ class SelectiveVMCloner:
|
|
|
476
476
|
|
|
477
477
|
return ET.tostring(root, encoding="unicode")
|
|
478
478
|
|
|
479
|
+
def _generate_health_check_script(self, config: VMConfig) -> str:
|
|
480
|
+
"""Generate a health check script that validates all installed components."""
|
|
481
|
+
import base64
|
|
482
|
+
|
|
483
|
+
# Build package check commands
|
|
484
|
+
apt_checks = []
|
|
485
|
+
for pkg in config.packages:
|
|
486
|
+
apt_checks.append(f'check_apt_package "{pkg}"')
|
|
487
|
+
|
|
488
|
+
snap_checks = []
|
|
489
|
+
for pkg in config.snap_packages:
|
|
490
|
+
snap_checks.append(f'check_snap_package "{pkg}"')
|
|
491
|
+
|
|
492
|
+
service_checks = []
|
|
493
|
+
for svc in config.services:
|
|
494
|
+
service_checks.append(f'check_service "{svc}"')
|
|
495
|
+
|
|
496
|
+
mount_checks = []
|
|
497
|
+
for idx, (host_path, guest_path) in enumerate(config.paths.items()):
|
|
498
|
+
mount_checks.append(f'check_mount "{guest_path}" "mount{idx}"')
|
|
499
|
+
|
|
500
|
+
apt_checks_str = "\n".join(apt_checks) if apt_checks else "echo 'No apt packages to check'"
|
|
501
|
+
snap_checks_str = "\n".join(snap_checks) if snap_checks else "echo 'No snap packages to check'"
|
|
502
|
+
service_checks_str = "\n".join(service_checks) if service_checks else "echo 'No services to check'"
|
|
503
|
+
mount_checks_str = "\n".join(mount_checks) if mount_checks else "echo 'No mounts to check'"
|
|
504
|
+
|
|
505
|
+
script = f'''#!/bin/bash
|
|
506
|
+
# CloneBox Health Check Script
|
|
507
|
+
# Generated automatically - validates all installed components
|
|
508
|
+
|
|
509
|
+
REPORT_FILE="/var/log/clonebox-health.log"
|
|
510
|
+
PASSED=0
|
|
511
|
+
FAILED=0
|
|
512
|
+
WARNINGS=0
|
|
513
|
+
|
|
514
|
+
# Colors for output
|
|
515
|
+
RED='\\033[0;31m'
|
|
516
|
+
GREEN='\\033[0;32m'
|
|
517
|
+
YELLOW='\\033[1;33m'
|
|
518
|
+
NC='\\033[0m'
|
|
519
|
+
|
|
520
|
+
log() {{
|
|
521
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$REPORT_FILE"
|
|
522
|
+
}}
|
|
523
|
+
|
|
524
|
+
check_apt_package() {{
|
|
525
|
+
local pkg="$1"
|
|
526
|
+
if dpkg -l "$pkg" 2>/dev/null | grep -q "^ii"; then
|
|
527
|
+
log "[PASS] APT package '$pkg' is installed"
|
|
528
|
+
((PASSED++))
|
|
529
|
+
return 0
|
|
530
|
+
else
|
|
531
|
+
log "[FAIL] APT package '$pkg' is NOT installed"
|
|
532
|
+
((FAILED++))
|
|
533
|
+
return 1
|
|
534
|
+
fi
|
|
535
|
+
}}
|
|
536
|
+
|
|
537
|
+
check_snap_package() {{
|
|
538
|
+
local pkg="$1"
|
|
539
|
+
if snap list "$pkg" &>/dev/null; then
|
|
540
|
+
log "[PASS] Snap package '$pkg' is installed"
|
|
541
|
+
((PASSED++))
|
|
542
|
+
return 0
|
|
543
|
+
else
|
|
544
|
+
log "[FAIL] Snap package '$pkg' is NOT installed"
|
|
545
|
+
((FAILED++))
|
|
546
|
+
return 1
|
|
547
|
+
fi
|
|
548
|
+
}}
|
|
549
|
+
|
|
550
|
+
check_service() {{
|
|
551
|
+
local svc="$1"
|
|
552
|
+
if systemctl is-enabled "$svc" &>/dev/null; then
|
|
553
|
+
if systemctl is-active "$svc" &>/dev/null; then
|
|
554
|
+
log "[PASS] Service '$svc' is enabled and running"
|
|
555
|
+
((PASSED++))
|
|
556
|
+
return 0
|
|
557
|
+
else
|
|
558
|
+
log "[WARN] Service '$svc' is enabled but not running"
|
|
559
|
+
((WARNINGS++))
|
|
560
|
+
return 1
|
|
561
|
+
fi
|
|
562
|
+
else
|
|
563
|
+
log "[INFO] Service '$svc' is not enabled (may be optional)"
|
|
564
|
+
return 0
|
|
565
|
+
fi
|
|
566
|
+
}}
|
|
567
|
+
|
|
568
|
+
check_mount() {{
|
|
569
|
+
local path="$1"
|
|
570
|
+
local tag="$2"
|
|
571
|
+
if mountpoint -q "$path" 2>/dev/null; then
|
|
572
|
+
log "[PASS] Mount '$path' ($tag) is active"
|
|
573
|
+
((PASSED++))
|
|
574
|
+
return 0
|
|
575
|
+
elif [ -d "$path" ]; then
|
|
576
|
+
log "[WARN] Directory '$path' exists but not mounted"
|
|
577
|
+
((WARNINGS++))
|
|
578
|
+
return 1
|
|
579
|
+
else
|
|
580
|
+
log "[INFO] Mount point '$path' does not exist yet"
|
|
581
|
+
return 0
|
|
582
|
+
fi
|
|
583
|
+
}}
|
|
584
|
+
|
|
585
|
+
check_gui() {{
|
|
586
|
+
if systemctl get-default | grep -q graphical; then
|
|
587
|
+
log "[PASS] System configured for graphical target"
|
|
588
|
+
((PASSED++))
|
|
589
|
+
if systemctl is-active gdm3 &>/dev/null || systemctl is-active gdm &>/dev/null; then
|
|
590
|
+
log "[PASS] Display manager (GDM) is running"
|
|
591
|
+
((PASSED++))
|
|
592
|
+
else
|
|
593
|
+
log "[WARN] Display manager not yet running (may start after reboot)"
|
|
594
|
+
((WARNINGS++))
|
|
595
|
+
fi
|
|
596
|
+
else
|
|
597
|
+
log "[INFO] System not configured for GUI"
|
|
598
|
+
fi
|
|
599
|
+
}}
|
|
600
|
+
|
|
601
|
+
# Start health check
|
|
602
|
+
log "=========================================="
|
|
603
|
+
log "CloneBox Health Check Report"
|
|
604
|
+
log "VM Name: {config.name}"
|
|
605
|
+
log "Date: $(date)"
|
|
606
|
+
log "=========================================="
|
|
607
|
+
|
|
608
|
+
log ""
|
|
609
|
+
log "--- APT Packages ---"
|
|
610
|
+
{apt_checks_str}
|
|
611
|
+
|
|
612
|
+
log ""
|
|
613
|
+
log "--- Snap Packages ---"
|
|
614
|
+
{snap_checks_str}
|
|
615
|
+
|
|
616
|
+
log ""
|
|
617
|
+
log "--- Services ---"
|
|
618
|
+
{service_checks_str}
|
|
619
|
+
|
|
620
|
+
log ""
|
|
621
|
+
log "--- Mounts ---"
|
|
622
|
+
{mount_checks_str}
|
|
623
|
+
|
|
624
|
+
log ""
|
|
625
|
+
log "--- GUI Status ---"
|
|
626
|
+
check_gui
|
|
627
|
+
|
|
628
|
+
log ""
|
|
629
|
+
log "=========================================="
|
|
630
|
+
log "Health Check Summary"
|
|
631
|
+
log "=========================================="
|
|
632
|
+
log "Passed: $PASSED"
|
|
633
|
+
log "Failed: $FAILED"
|
|
634
|
+
log "Warnings: $WARNINGS"
|
|
635
|
+
|
|
636
|
+
if [ $FAILED -eq 0 ]; then
|
|
637
|
+
log ""
|
|
638
|
+
log "[SUCCESS] All critical checks passed!"
|
|
639
|
+
echo "HEALTH_STATUS=OK" > /var/log/clonebox-health-status
|
|
640
|
+
exit 0
|
|
641
|
+
else
|
|
642
|
+
log ""
|
|
643
|
+
log "[ERROR] Some checks failed. Review log for details."
|
|
644
|
+
echo "HEALTH_STATUS=FAILED" > /var/log/clonebox-health-status
|
|
645
|
+
exit 1
|
|
646
|
+
fi
|
|
647
|
+
'''
|
|
648
|
+
# Encode script to base64 for safe embedding in cloud-init
|
|
649
|
+
encoded = base64.b64encode(script.encode()).decode()
|
|
650
|
+
return encoded
|
|
651
|
+
|
|
479
652
|
def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig) -> Path:
|
|
480
653
|
"""Create cloud-init ISO with user-data and meta-data."""
|
|
481
654
|
|
|
@@ -486,15 +659,18 @@ class SelectiveVMCloner:
|
|
|
486
659
|
meta_data = f"instance-id: {config.name}\nlocal-hostname: {config.name}\n"
|
|
487
660
|
(cloudinit_dir / "meta-data").write_text(meta_data)
|
|
488
661
|
|
|
489
|
-
# Generate mount commands for 9p filesystems
|
|
662
|
+
# Generate mount commands and fstab entries for 9p filesystems
|
|
490
663
|
mount_commands = []
|
|
664
|
+
fstab_entries = []
|
|
491
665
|
for idx, (host_path, guest_path) in enumerate(config.paths.items()):
|
|
492
666
|
if Path(host_path).exists():
|
|
493
667
|
tag = f"mount{idx}"
|
|
494
668
|
mount_commands.append(f" - mkdir -p {guest_path}")
|
|
495
669
|
mount_commands.append(
|
|
496
|
-
f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path}"
|
|
670
|
+
f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path} || true"
|
|
497
671
|
)
|
|
672
|
+
# Add fstab entry for persistence after reboot
|
|
673
|
+
fstab_entries.append(f"{tag} {guest_path} 9p trans=virtio,version=9p2000.L,nofail 0 0")
|
|
498
674
|
|
|
499
675
|
# User-data
|
|
500
676
|
# Add desktop environment if GUI is enabled
|
|
@@ -517,7 +693,13 @@ class SelectiveVMCloner:
|
|
|
517
693
|
for svc in config.services:
|
|
518
694
|
runcmd_lines.append(f" - systemctl enable --now {svc} || true")
|
|
519
695
|
|
|
520
|
-
# Add mounts
|
|
696
|
+
# Add fstab entries for persistent mounts after reboot
|
|
697
|
+
if fstab_entries:
|
|
698
|
+
runcmd_lines.append(" - echo '# CloneBox 9p mounts' >> /etc/fstab")
|
|
699
|
+
for entry in fstab_entries:
|
|
700
|
+
runcmd_lines.append(f" - echo '{entry}' >> /etc/fstab")
|
|
701
|
+
|
|
702
|
+
# Add mounts (immediate, before reboot)
|
|
521
703
|
for cmd in mount_commands:
|
|
522
704
|
runcmd_lines.append(cmd)
|
|
523
705
|
|
|
@@ -540,16 +722,17 @@ class SelectiveVMCloner:
|
|
|
540
722
|
for cmd in config.post_commands:
|
|
541
723
|
runcmd_lines.append(f" - {cmd}")
|
|
542
724
|
|
|
543
|
-
#
|
|
544
|
-
|
|
545
|
-
runcmd_lines.append(" -
|
|
546
|
-
runcmd_lines.append(" -
|
|
725
|
+
# Generate health check script
|
|
726
|
+
health_script = self._generate_health_check_script(config)
|
|
727
|
+
runcmd_lines.append(f" - echo '{health_script}' | base64 -d > /usr/local/bin/clonebox-health")
|
|
728
|
+
runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-health")
|
|
729
|
+
runcmd_lines.append(" - /usr/local/bin/clonebox-health >> /var/log/clonebox-health.log 2>&1")
|
|
547
730
|
runcmd_lines.append(" - echo 'CloneBox VM ready!' > /var/log/clonebox-ready")
|
|
548
|
-
runcmd_lines.append(" - echo 'Setup completed at:' $(date) >> /var/log/clonebox-setup.log")
|
|
549
731
|
|
|
550
732
|
# Add reboot command at the end if GUI is enabled
|
|
551
733
|
if config.gui:
|
|
552
|
-
runcmd_lines.append(" -
|
|
734
|
+
runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
|
|
735
|
+
runcmd_lines.append(" - sleep 10 && reboot")
|
|
553
736
|
|
|
554
737
|
runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
|
|
555
738
|
|
|
@@ -50,10 +50,16 @@ class TestNetworkMode:
|
|
|
50
50
|
@patch("clonebox.cloner.libvirt")
|
|
51
51
|
def test_resolve_network_mode_auto_user_no_default(self, mock_libvirt):
|
|
52
52
|
"""Test auto mode with user session and no default network falls back to user."""
|
|
53
|
-
|
|
53
|
+
# Handle missing libvirt module in test environment
|
|
54
|
+
try:
|
|
55
|
+
import libvirt as real_libvirt
|
|
56
|
+
libvirt_error = real_libvirt.libvirtError
|
|
57
|
+
except ImportError:
|
|
58
|
+
class libvirt_error(Exception):
|
|
59
|
+
pass
|
|
54
60
|
|
|
55
61
|
mock_conn = MagicMock()
|
|
56
|
-
mock_conn.networkLookupByName.side_effect =
|
|
62
|
+
mock_conn.networkLookupByName.side_effect = libvirt_error("No network")
|
|
57
63
|
mock_libvirt.open.return_value = mock_conn
|
|
58
64
|
|
|
59
65
|
cloner = SelectiveVMCloner(user_session=True)
|
|
@@ -126,10 +132,16 @@ class TestNetworkMode:
|
|
|
126
132
|
@patch("clonebox.cloner.libvirt")
|
|
127
133
|
def test_default_network_active_not_found(self, mock_libvirt):
|
|
128
134
|
"""Test _default_network_active returns False when network not found."""
|
|
129
|
-
|
|
135
|
+
# Handle missing libvirt module in test environment
|
|
136
|
+
try:
|
|
137
|
+
import libvirt as real_libvirt
|
|
138
|
+
libvirt_error = real_libvirt.libvirtError
|
|
139
|
+
except ImportError:
|
|
140
|
+
class libvirt_error(Exception):
|
|
141
|
+
pass
|
|
130
142
|
|
|
131
143
|
mock_conn = MagicMock()
|
|
132
|
-
mock_conn.networkLookupByName.side_effect =
|
|
144
|
+
mock_conn.networkLookupByName.side_effect = libvirt_error("Not found")
|
|
133
145
|
mock_libvirt.open.return_value = mock_conn
|
|
134
146
|
|
|
135
147
|
cloner = SelectiveVMCloner()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|