clonebox 0.1.8__tar.gz → 0.1.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clonebox-0.1.8 → clonebox-0.1.9}/PKG-INFO +1 -1
- {clonebox-0.1.8 → clonebox-0.1.9}/pyproject.toml +1 -1
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox/cli.py +489 -7
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox/cloner.py +14 -4
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/PKG-INFO +1 -1
- {clonebox-0.1.8 → clonebox-0.1.9}/LICENSE +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/README.md +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/setup.cfg +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox/__init__.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox/__main__.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox/detector.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/SOURCES.txt +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/dependency_links.txt +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/entry_points.txt +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/requires.txt +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/src/clonebox.egg-info/top_level.txt +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/tests/test_cli.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/tests/test_cloner.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/tests/test_detector.py +0 -0
- {clonebox-0.1.8 → clonebox-0.1.9}/tests/test_network.py +0 -0
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "clonebox"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.9"
|
|
8
8
|
description = "Clone your workstation environment to an isolated VM with selective apps, paths and services"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = {text = "Apache-2.0"}
|
|
@@ -488,6 +488,442 @@ def cmd_list(args):
|
|
|
488
488
|
console.print(table)
|
|
489
489
|
|
|
490
490
|
|
|
491
|
+
def cmd_status(args):
|
|
492
|
+
"""Check VM installation status and health from workstation."""
|
|
493
|
+
import subprocess
|
|
494
|
+
|
|
495
|
+
name = args.name
|
|
496
|
+
user_session = getattr(args, "user", False)
|
|
497
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
498
|
+
|
|
499
|
+
# If name is a path, load config to get VM name
|
|
500
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
501
|
+
target_path = Path(name).expanduser().resolve()
|
|
502
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
503
|
+
if config_file.exists():
|
|
504
|
+
config = load_clonebox_config(config_file)
|
|
505
|
+
name = config["vm"]["name"]
|
|
506
|
+
else:
|
|
507
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
508
|
+
return
|
|
509
|
+
|
|
510
|
+
if not name:
|
|
511
|
+
# Try current directory
|
|
512
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
513
|
+
if config_file.exists():
|
|
514
|
+
config = load_clonebox_config(config_file)
|
|
515
|
+
name = config["vm"]["name"]
|
|
516
|
+
else:
|
|
517
|
+
console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
|
|
518
|
+
return
|
|
519
|
+
|
|
520
|
+
console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
|
|
521
|
+
|
|
522
|
+
# Check VM state
|
|
523
|
+
try:
|
|
524
|
+
result = subprocess.run(
|
|
525
|
+
["virsh", "--connect", conn_uri, "domstate", name],
|
|
526
|
+
capture_output=True, text=True, timeout=5
|
|
527
|
+
)
|
|
528
|
+
vm_state = result.stdout.strip()
|
|
529
|
+
|
|
530
|
+
if "running" in vm_state.lower():
|
|
531
|
+
console.print(f"[green]✅ VM State: {vm_state}[/]")
|
|
532
|
+
elif "shut off" in vm_state.lower():
|
|
533
|
+
console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
|
|
534
|
+
console.print("[dim]Start with: clonebox start .[/]")
|
|
535
|
+
return
|
|
536
|
+
else:
|
|
537
|
+
console.print(f"[dim]VM State: {vm_state}[/]")
|
|
538
|
+
except subprocess.TimeoutExpired:
|
|
539
|
+
console.print("[red]❌ Timeout checking VM state[/]")
|
|
540
|
+
return
|
|
541
|
+
except Exception as e:
|
|
542
|
+
console.print(f"[red]❌ Error: {e}[/]")
|
|
543
|
+
return
|
|
544
|
+
|
|
545
|
+
# Get VM IP address
|
|
546
|
+
console.print("\n[bold]🔍 Checking VM network...[/]")
|
|
547
|
+
try:
|
|
548
|
+
result = subprocess.run(
|
|
549
|
+
["virsh", "--connect", conn_uri, "domifaddr", name],
|
|
550
|
+
capture_output=True, text=True, timeout=10
|
|
551
|
+
)
|
|
552
|
+
if result.stdout.strip():
|
|
553
|
+
console.print(f"[dim]{result.stdout.strip()}[/]")
|
|
554
|
+
# Extract IP
|
|
555
|
+
for line in result.stdout.split('\n'):
|
|
556
|
+
if 'ipv4' in line.lower():
|
|
557
|
+
parts = line.split()
|
|
558
|
+
for p in parts:
|
|
559
|
+
if '/' in p and '.' in p:
|
|
560
|
+
ip = p.split('/')[0]
|
|
561
|
+
console.print(f"[green]IP Address: {ip}[/]")
|
|
562
|
+
break
|
|
563
|
+
else:
|
|
564
|
+
console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
|
|
565
|
+
except Exception as e:
|
|
566
|
+
console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
|
|
567
|
+
|
|
568
|
+
# Check cloud-init status via console
|
|
569
|
+
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
570
|
+
try:
|
|
571
|
+
# Use virsh console to check - this is tricky, so we check for the ready file
|
|
572
|
+
result = subprocess.run(
|
|
573
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
574
|
+
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
|
|
575
|
+
capture_output=True, text=True, timeout=10
|
|
576
|
+
)
|
|
577
|
+
if "CloneBox VM ready" in result.stdout or result.returncode == 0:
|
|
578
|
+
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
579
|
+
else:
|
|
580
|
+
console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
|
|
581
|
+
except Exception:
|
|
582
|
+
console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
|
|
583
|
+
|
|
584
|
+
# Check health status if available
|
|
585
|
+
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
586
|
+
try:
|
|
587
|
+
result = subprocess.run(
|
|
588
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
589
|
+
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
|
|
590
|
+
capture_output=True, text=True, timeout=10
|
|
591
|
+
)
|
|
592
|
+
if "HEALTH_STATUS=OK" in result.stdout:
|
|
593
|
+
console.print("[green]✅ Health: All checks passed[/]")
|
|
594
|
+
elif "HEALTH_STATUS=FAILED" in result.stdout:
|
|
595
|
+
console.print("[red]❌ Health: Some checks failed[/]")
|
|
596
|
+
else:
|
|
597
|
+
console.print("[yellow]⏳ Health check not yet run[/]")
|
|
598
|
+
except Exception:
|
|
599
|
+
console.print("[dim]Health status: Not available yet[/]")
|
|
600
|
+
|
|
601
|
+
# Show useful commands
|
|
602
|
+
console.print("\n[bold]📋 Useful commands:[/]")
|
|
603
|
+
console.print(f" [cyan]virt-viewer --connect {conn_uri} {name}[/] # Open GUI")
|
|
604
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/] # Console access")
|
|
605
|
+
console.print(" [dim]Inside VM:[/]")
|
|
606
|
+
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
|
|
607
|
+
console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
|
|
608
|
+
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
609
|
+
|
|
610
|
+
# Run full health check if requested
|
|
611
|
+
if getattr(args, "health", False):
|
|
612
|
+
console.print("\n[bold]🔄 Running full health check...[/]")
|
|
613
|
+
try:
|
|
614
|
+
result = subprocess.run(
|
|
615
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
616
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
|
|
617
|
+
capture_output=True, text=True, timeout=60
|
|
618
|
+
)
|
|
619
|
+
console.print("[green]Health check triggered. View results with:[/]")
|
|
620
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/]")
|
|
621
|
+
console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
|
|
622
|
+
except Exception as e:
|
|
623
|
+
console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
def cmd_export(args):
|
|
627
|
+
"""Export VM and data for migration to another workstation."""
|
|
628
|
+
import subprocess
|
|
629
|
+
import tarfile
|
|
630
|
+
import shutil
|
|
631
|
+
|
|
632
|
+
name = args.name
|
|
633
|
+
user_session = getattr(args, "user", False)
|
|
634
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
635
|
+
include_data = getattr(args, "include_data", False)
|
|
636
|
+
output = getattr(args, "output", None)
|
|
637
|
+
|
|
638
|
+
# If name is a path, load config
|
|
639
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
640
|
+
target_path = Path(name).expanduser().resolve()
|
|
641
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
642
|
+
if config_file.exists():
|
|
643
|
+
config = load_clonebox_config(config_file)
|
|
644
|
+
name = config["vm"]["name"]
|
|
645
|
+
else:
|
|
646
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
647
|
+
return
|
|
648
|
+
|
|
649
|
+
if not name:
|
|
650
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
651
|
+
if config_file.exists():
|
|
652
|
+
config = load_clonebox_config(config_file)
|
|
653
|
+
name = config["vm"]["name"]
|
|
654
|
+
else:
|
|
655
|
+
console.print("[red]❌ No VM name specified[/]")
|
|
656
|
+
return
|
|
657
|
+
|
|
658
|
+
console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
|
|
659
|
+
|
|
660
|
+
# Determine storage path
|
|
661
|
+
if user_session:
|
|
662
|
+
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
663
|
+
else:
|
|
664
|
+
storage_base = Path("/var/lib/libvirt/images")
|
|
665
|
+
|
|
666
|
+
vm_dir = storage_base / name
|
|
667
|
+
|
|
668
|
+
if not vm_dir.exists():
|
|
669
|
+
console.print(f"[red]❌ VM storage not found: {vm_dir}[/]")
|
|
670
|
+
return
|
|
671
|
+
|
|
672
|
+
# Create export directory
|
|
673
|
+
export_name = output or f"{name}-export.tar.gz"
|
|
674
|
+
if not export_name.endswith(".tar.gz"):
|
|
675
|
+
export_name += ".tar.gz"
|
|
676
|
+
|
|
677
|
+
export_path = Path(export_name).resolve()
|
|
678
|
+
temp_dir = Path(f"/tmp/clonebox-export-{name}")
|
|
679
|
+
|
|
680
|
+
try:
|
|
681
|
+
# Clean up temp dir if exists
|
|
682
|
+
if temp_dir.exists():
|
|
683
|
+
shutil.rmtree(temp_dir)
|
|
684
|
+
temp_dir.mkdir(parents=True)
|
|
685
|
+
|
|
686
|
+
# Stop VM if running
|
|
687
|
+
console.print("[cyan]Stopping VM for export...[/]")
|
|
688
|
+
subprocess.run(
|
|
689
|
+
["virsh", "--connect", conn_uri, "shutdown", name],
|
|
690
|
+
capture_output=True, timeout=30
|
|
691
|
+
)
|
|
692
|
+
import time
|
|
693
|
+
time.sleep(5)
|
|
694
|
+
subprocess.run(
|
|
695
|
+
["virsh", "--connect", conn_uri, "destroy", name],
|
|
696
|
+
capture_output=True, timeout=10
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Export VM XML
|
|
700
|
+
console.print("[cyan]Exporting VM definition...[/]")
|
|
701
|
+
result = subprocess.run(
|
|
702
|
+
["virsh", "--connect", conn_uri, "dumpxml", name],
|
|
703
|
+
capture_output=True, text=True, timeout=30
|
|
704
|
+
)
|
|
705
|
+
(temp_dir / "vm.xml").write_text(result.stdout)
|
|
706
|
+
|
|
707
|
+
# Copy disk image
|
|
708
|
+
console.print("[cyan]Copying disk image (this may take a while)...[/]")
|
|
709
|
+
disk_image = vm_dir / f"{name}.qcow2"
|
|
710
|
+
if disk_image.exists():
|
|
711
|
+
shutil.copy2(disk_image, temp_dir / "disk.qcow2")
|
|
712
|
+
|
|
713
|
+
# Copy cloud-init ISO
|
|
714
|
+
cloudinit_iso = vm_dir / "cloud-init.iso"
|
|
715
|
+
if cloudinit_iso.exists():
|
|
716
|
+
shutil.copy2(cloudinit_iso, temp_dir / "cloud-init.iso")
|
|
717
|
+
|
|
718
|
+
# Copy config file
|
|
719
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
720
|
+
if config_file.exists():
|
|
721
|
+
shutil.copy2(config_file, temp_dir / ".clonebox.yaml")
|
|
722
|
+
|
|
723
|
+
# Copy .env file (without sensitive data warning)
|
|
724
|
+
env_file = Path.cwd() / ".env"
|
|
725
|
+
if env_file.exists():
|
|
726
|
+
shutil.copy2(env_file, temp_dir / ".env")
|
|
727
|
+
|
|
728
|
+
# Include shared data if requested
|
|
729
|
+
if include_data:
|
|
730
|
+
console.print("[cyan]Bundling shared data (browser profiles, configs)...[/]")
|
|
731
|
+
data_dir = temp_dir / "data"
|
|
732
|
+
data_dir.mkdir()
|
|
733
|
+
|
|
734
|
+
# Load config to get paths
|
|
735
|
+
if config_file.exists():
|
|
736
|
+
config = load_clonebox_config(config_file)
|
|
737
|
+
all_paths = config.get("paths", {}).copy()
|
|
738
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
739
|
+
|
|
740
|
+
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
741
|
+
host_p = Path(host_path)
|
|
742
|
+
if host_p.exists():
|
|
743
|
+
dest = data_dir / f"mount{idx}"
|
|
744
|
+
console.print(f" [dim]Copying {host_path}...[/]")
|
|
745
|
+
try:
|
|
746
|
+
if host_p.is_dir():
|
|
747
|
+
shutil.copytree(host_p, dest, symlinks=True,
|
|
748
|
+
ignore=shutil.ignore_patterns('*.pyc', '__pycache__', '.git'))
|
|
749
|
+
else:
|
|
750
|
+
shutil.copy2(host_p, dest)
|
|
751
|
+
except Exception as e:
|
|
752
|
+
console.print(f" [yellow]⚠️ Skipped {host_path}: {e}[/]")
|
|
753
|
+
|
|
754
|
+
# Save path mapping
|
|
755
|
+
import json
|
|
756
|
+
(data_dir / "paths.json").write_text(json.dumps(all_paths, indent=2))
|
|
757
|
+
|
|
758
|
+
# Create tarball
|
|
759
|
+
console.print(f"[cyan]Creating archive: {export_path}[/]")
|
|
760
|
+
with tarfile.open(export_path, "w:gz") as tar:
|
|
761
|
+
tar.add(temp_dir, arcname=name)
|
|
762
|
+
|
|
763
|
+
# Get size
|
|
764
|
+
size_mb = export_path.stat().st_size / 1024 / 1024
|
|
765
|
+
|
|
766
|
+
console.print(f"\n[bold green]✅ Export complete![/]")
|
|
767
|
+
console.print(f" File: [cyan]{export_path}[/]")
|
|
768
|
+
console.print(f" Size: [cyan]{size_mb:.1f} MB[/]")
|
|
769
|
+
console.print(f"\n[bold]To import on another workstation:[/]")
|
|
770
|
+
console.print(f" [cyan]clonebox import {export_path.name}[/]")
|
|
771
|
+
|
|
772
|
+
finally:
|
|
773
|
+
# Cleanup
|
|
774
|
+
if temp_dir.exists():
|
|
775
|
+
shutil.rmtree(temp_dir)
|
|
776
|
+
|
|
777
|
+
# Restart VM
|
|
778
|
+
console.print("\n[cyan]Restarting VM...[/]")
|
|
779
|
+
subprocess.run(
|
|
780
|
+
["virsh", "--connect", conn_uri, "start", name],
|
|
781
|
+
capture_output=True, timeout=30
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def cmd_import(args):
|
|
786
|
+
"""Import VM from export archive."""
|
|
787
|
+
import subprocess
|
|
788
|
+
import tarfile
|
|
789
|
+
import shutil
|
|
790
|
+
|
|
791
|
+
archive_path = Path(args.archive).resolve()
|
|
792
|
+
user_session = getattr(args, "user", False)
|
|
793
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
794
|
+
|
|
795
|
+
if not archive_path.exists():
|
|
796
|
+
console.print(f"[red]❌ Archive not found: {archive_path}[/]")
|
|
797
|
+
return
|
|
798
|
+
|
|
799
|
+
console.print(f"[bold cyan]📥 Importing VM from: {archive_path}[/]\n")
|
|
800
|
+
|
|
801
|
+
# Determine storage path
|
|
802
|
+
if user_session:
|
|
803
|
+
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
804
|
+
else:
|
|
805
|
+
storage_base = Path("/var/lib/libvirt/images")
|
|
806
|
+
|
|
807
|
+
storage_base.mkdir(parents=True, exist_ok=True)
|
|
808
|
+
|
|
809
|
+
temp_dir = Path(f"/tmp/clonebox-import-{archive_path.stem}")
|
|
810
|
+
|
|
811
|
+
try:
|
|
812
|
+
# Extract archive
|
|
813
|
+
console.print("[cyan]Extracting archive...[/]")
|
|
814
|
+
if temp_dir.exists():
|
|
815
|
+
shutil.rmtree(temp_dir)
|
|
816
|
+
temp_dir.mkdir(parents=True)
|
|
817
|
+
|
|
818
|
+
with tarfile.open(archive_path, "r:gz") as tar:
|
|
819
|
+
tar.extractall(temp_dir)
|
|
820
|
+
|
|
821
|
+
# Find extracted VM directory
|
|
822
|
+
vm_dirs = list(temp_dir.iterdir())
|
|
823
|
+
if not vm_dirs:
|
|
824
|
+
console.print("[red]❌ Empty archive[/]")
|
|
825
|
+
return
|
|
826
|
+
|
|
827
|
+
extracted_dir = vm_dirs[0]
|
|
828
|
+
vm_name = extracted_dir.name
|
|
829
|
+
|
|
830
|
+
console.print(f"[cyan]VM Name: {vm_name}[/]")
|
|
831
|
+
|
|
832
|
+
# Create VM storage directory
|
|
833
|
+
vm_storage = storage_base / vm_name
|
|
834
|
+
if vm_storage.exists():
|
|
835
|
+
if not getattr(args, "replace", False):
|
|
836
|
+
console.print(f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]")
|
|
837
|
+
return
|
|
838
|
+
shutil.rmtree(vm_storage)
|
|
839
|
+
|
|
840
|
+
vm_storage.mkdir(parents=True)
|
|
841
|
+
|
|
842
|
+
# Copy disk image
|
|
843
|
+
console.print("[cyan]Copying disk image...[/]")
|
|
844
|
+
disk_src = extracted_dir / "disk.qcow2"
|
|
845
|
+
if disk_src.exists():
|
|
846
|
+
shutil.copy2(disk_src, vm_storage / f"{vm_name}.qcow2")
|
|
847
|
+
|
|
848
|
+
# Copy cloud-init ISO
|
|
849
|
+
cloudinit_src = extracted_dir / "cloud-init.iso"
|
|
850
|
+
if cloudinit_src.exists():
|
|
851
|
+
shutil.copy2(cloudinit_src, vm_storage / "cloud-init.iso")
|
|
852
|
+
|
|
853
|
+
# Copy config files to current directory
|
|
854
|
+
config_src = extracted_dir / ".clonebox.yaml"
|
|
855
|
+
if config_src.exists():
|
|
856
|
+
shutil.copy2(config_src, Path.cwd() / ".clonebox.yaml")
|
|
857
|
+
console.print("[green]✅ Copied .clonebox.yaml[/]")
|
|
858
|
+
|
|
859
|
+
env_src = extracted_dir / ".env"
|
|
860
|
+
if env_src.exists():
|
|
861
|
+
shutil.copy2(env_src, Path.cwd() / ".env")
|
|
862
|
+
console.print("[green]✅ Copied .env[/]")
|
|
863
|
+
|
|
864
|
+
# Restore data if included
|
|
865
|
+
data_dir = extracted_dir / "data"
|
|
866
|
+
if data_dir.exists():
|
|
867
|
+
import json
|
|
868
|
+
paths_file = data_dir / "paths.json"
|
|
869
|
+
if paths_file.exists():
|
|
870
|
+
paths_mapping = json.loads(paths_file.read_text())
|
|
871
|
+
console.print("\n[cyan]Restoring shared data...[/]")
|
|
872
|
+
|
|
873
|
+
for idx, (host_path, guest_path) in enumerate(paths_mapping.items()):
|
|
874
|
+
src = data_dir / f"mount{idx}"
|
|
875
|
+
if src.exists():
|
|
876
|
+
dest = Path(host_path)
|
|
877
|
+
console.print(f" [dim]Restoring to {host_path}...[/]")
|
|
878
|
+
try:
|
|
879
|
+
if dest.exists():
|
|
880
|
+
console.print(f" [yellow]⚠️ Skipped (already exists)[/]")
|
|
881
|
+
else:
|
|
882
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
883
|
+
if src.is_dir():
|
|
884
|
+
shutil.copytree(src, dest)
|
|
885
|
+
else:
|
|
886
|
+
shutil.copy2(src, dest)
|
|
887
|
+
except Exception as e:
|
|
888
|
+
console.print(f" [yellow]⚠️ Error: {e}[/]")
|
|
889
|
+
|
|
890
|
+
# Modify and define VM XML
|
|
891
|
+
console.print("\n[cyan]Defining VM...[/]")
|
|
892
|
+
xml_src = extracted_dir / "vm.xml"
|
|
893
|
+
if xml_src.exists():
|
|
894
|
+
xml_content = xml_src.read_text()
|
|
895
|
+
|
|
896
|
+
# Update paths in XML to new storage location
|
|
897
|
+
# This is a simple replacement - may need more sophisticated handling
|
|
898
|
+
xml_content = xml_content.replace(
|
|
899
|
+
f"/home/", f"{Path.home()}/"
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
# Write modified XML
|
|
903
|
+
modified_xml = temp_dir / "vm-modified.xml"
|
|
904
|
+
modified_xml.write_text(xml_content)
|
|
905
|
+
|
|
906
|
+
# Define VM
|
|
907
|
+
result = subprocess.run(
|
|
908
|
+
["virsh", "--connect", conn_uri, "define", str(modified_xml)],
|
|
909
|
+
capture_output=True, text=True, timeout=30
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
if result.returncode == 0:
|
|
913
|
+
console.print(f"[green]✅ VM '{vm_name}' defined successfully![/]")
|
|
914
|
+
else:
|
|
915
|
+
console.print(f"[yellow]⚠️ VM definition warning: {result.stderr}[/]")
|
|
916
|
+
|
|
917
|
+
console.print(f"\n[bold green]✅ Import complete![/]")
|
|
918
|
+
console.print(f"\n[bold]To start the VM:[/]")
|
|
919
|
+
console.print(f" [cyan]clonebox start . {'--user' if user_session else ''}[/]")
|
|
920
|
+
|
|
921
|
+
finally:
|
|
922
|
+
# Cleanup
|
|
923
|
+
if temp_dir.exists():
|
|
924
|
+
shutil.rmtree(temp_dir)
|
|
925
|
+
|
|
926
|
+
|
|
491
927
|
CLONEBOX_CONFIG_FILE = ".clonebox.yaml"
|
|
492
928
|
CLONEBOX_ENV_FILE = ".env"
|
|
493
929
|
|
|
@@ -709,7 +1145,7 @@ def load_clonebox_config(path: Path) -> dict:
|
|
|
709
1145
|
return config
|
|
710
1146
|
|
|
711
1147
|
|
|
712
|
-
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int =
|
|
1148
|
+
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 900):
|
|
713
1149
|
"""Monitor cloud-init status in VM and show progress."""
|
|
714
1150
|
import subprocess
|
|
715
1151
|
import time
|
|
@@ -763,15 +1199,17 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
763
1199
|
time.sleep(2)
|
|
764
1200
|
break
|
|
765
1201
|
|
|
766
|
-
# Estimate remaining time
|
|
1202
|
+
# Estimate remaining time (total ~12-15 minutes for full desktop install)
|
|
767
1203
|
if elapsed < 60:
|
|
768
|
-
remaining = "~
|
|
769
|
-
elif elapsed < 180:
|
|
770
|
-
remaining = f"~{8 - minutes} minutes"
|
|
1204
|
+
remaining = "~12-15 minutes"
|
|
771
1205
|
elif elapsed < 300:
|
|
772
|
-
remaining = f"~{
|
|
1206
|
+
remaining = f"~{12 - minutes} minutes"
|
|
1207
|
+
elif elapsed < 600:
|
|
1208
|
+
remaining = f"~{10 - minutes} minutes"
|
|
1209
|
+
elif elapsed < 800:
|
|
1210
|
+
remaining = "finishing soon..."
|
|
773
1211
|
else:
|
|
774
|
-
remaining = "
|
|
1212
|
+
remaining = "almost done"
|
|
775
1213
|
|
|
776
1214
|
if restart_detected:
|
|
777
1215
|
progress.update(task, description=f"[cyan]Starting GUI... ({minutes}m {seconds}s, {remaining})")
|
|
@@ -1178,6 +1616,50 @@ def main():
|
|
|
1178
1616
|
)
|
|
1179
1617
|
clone_parser.set_defaults(func=cmd_clone)
|
|
1180
1618
|
|
|
1619
|
+
# Status command - check VM health from workstation
|
|
1620
|
+
status_parser = subparsers.add_parser("status", help="Check VM installation status and health")
|
|
1621
|
+
status_parser.add_argument(
|
|
1622
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
1623
|
+
)
|
|
1624
|
+
status_parser.add_argument(
|
|
1625
|
+
"-u",
|
|
1626
|
+
"--user",
|
|
1627
|
+
action="store_true",
|
|
1628
|
+
help="Use user session (qemu:///session)",
|
|
1629
|
+
)
|
|
1630
|
+
status_parser.add_argument(
|
|
1631
|
+
"--health", "-H", action="store_true", help="Run full health check"
|
|
1632
|
+
)
|
|
1633
|
+
status_parser.set_defaults(func=cmd_status)
|
|
1634
|
+
|
|
1635
|
+
# Export command - package VM for migration
|
|
1636
|
+
export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
|
|
1637
|
+
export_parser.add_argument(
|
|
1638
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
1639
|
+
)
|
|
1640
|
+
export_parser.add_argument(
|
|
1641
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
1642
|
+
)
|
|
1643
|
+
export_parser.add_argument(
|
|
1644
|
+
"-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
|
|
1645
|
+
)
|
|
1646
|
+
export_parser.add_argument(
|
|
1647
|
+
"--include-data", "-d", action="store_true",
|
|
1648
|
+
help="Include shared data (browser profiles, configs) in export"
|
|
1649
|
+
)
|
|
1650
|
+
export_parser.set_defaults(func=cmd_export)
|
|
1651
|
+
|
|
1652
|
+
# Import command - restore VM from export
|
|
1653
|
+
import_parser = subparsers.add_parser("import", help="Import VM from export archive")
|
|
1654
|
+
import_parser.add_argument("archive", help="Path to export archive (.tar.gz)")
|
|
1655
|
+
import_parser.add_argument(
|
|
1656
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
1657
|
+
)
|
|
1658
|
+
import_parser.add_argument(
|
|
1659
|
+
"--replace", action="store_true", help="Replace existing VM if exists"
|
|
1660
|
+
)
|
|
1661
|
+
import_parser.set_defaults(func=cmd_import)
|
|
1662
|
+
|
|
1181
1663
|
args = parser.parse_args()
|
|
1182
1664
|
|
|
1183
1665
|
if hasattr(args, "func"):
|
|
@@ -659,15 +659,18 @@ fi
|
|
|
659
659
|
meta_data = f"instance-id: {config.name}\nlocal-hostname: {config.name}\n"
|
|
660
660
|
(cloudinit_dir / "meta-data").write_text(meta_data)
|
|
661
661
|
|
|
662
|
-
# Generate mount commands for 9p filesystems
|
|
662
|
+
# Generate mount commands and fstab entries for 9p filesystems
|
|
663
663
|
mount_commands = []
|
|
664
|
+
fstab_entries = []
|
|
664
665
|
for idx, (host_path, guest_path) in enumerate(config.paths.items()):
|
|
665
666
|
if Path(host_path).exists():
|
|
666
667
|
tag = f"mount{idx}"
|
|
667
668
|
mount_commands.append(f" - mkdir -p {guest_path}")
|
|
668
669
|
mount_commands.append(
|
|
669
|
-
f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path}"
|
|
670
|
+
f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path} || true"
|
|
670
671
|
)
|
|
672
|
+
# Add fstab entry for persistence after reboot
|
|
673
|
+
fstab_entries.append(f"{tag} {guest_path} 9p trans=virtio,version=9p2000.L,nofail 0 0")
|
|
671
674
|
|
|
672
675
|
# User-data
|
|
673
676
|
# Add desktop environment if GUI is enabled
|
|
@@ -690,7 +693,13 @@ fi
|
|
|
690
693
|
for svc in config.services:
|
|
691
694
|
runcmd_lines.append(f" - systemctl enable --now {svc} || true")
|
|
692
695
|
|
|
693
|
-
# Add mounts
|
|
696
|
+
# Add fstab entries for persistent mounts after reboot
|
|
697
|
+
if fstab_entries:
|
|
698
|
+
runcmd_lines.append(" - echo '# CloneBox 9p mounts' >> /etc/fstab")
|
|
699
|
+
for entry in fstab_entries:
|
|
700
|
+
runcmd_lines.append(f" - echo '{entry}' >> /etc/fstab")
|
|
701
|
+
|
|
702
|
+
# Add mounts (immediate, before reboot)
|
|
694
703
|
for cmd in mount_commands:
|
|
695
704
|
runcmd_lines.append(cmd)
|
|
696
705
|
|
|
@@ -722,7 +731,8 @@ fi
|
|
|
722
731
|
|
|
723
732
|
# Add reboot command at the end if GUI is enabled
|
|
724
733
|
if config.gui:
|
|
725
|
-
runcmd_lines.append(" -
|
|
734
|
+
runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
|
|
735
|
+
runcmd_lines.append(" - sleep 10 && reboot")
|
|
726
736
|
|
|
727
737
|
runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
|
|
728
738
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|