clonebox 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cli.py CHANGED
@@ -445,7 +445,72 @@ def cmd_start(args):
445
445
  return
446
446
 
447
447
  cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
448
- cloner.start_vm(name, open_viewer=not args.no_viewer, console=console)
448
+ open_viewer = getattr(args, "viewer", False) or not getattr(args, "no_viewer", False)
449
+ cloner.start_vm(name, open_viewer=open_viewer, console=console)
450
+
451
+
452
+ def cmd_open(args):
453
+ """Open VM viewer window."""
454
+ import subprocess
455
+
456
+ name = args.name
457
+ user_session = getattr(args, "user", False)
458
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
459
+
460
+ # If name is a path, load config
461
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
462
+ target_path = Path(name).expanduser().resolve()
463
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
464
+ if config_file.exists():
465
+ config = load_clonebox_config(config_file)
466
+ name = config["vm"]["name"]
467
+ else:
468
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
469
+ return
470
+ elif name == "." or not name:
471
+ config_file = Path.cwd() / ".clonebox.yaml"
472
+ if config_file.exists():
473
+ config = load_clonebox_config(config_file)
474
+ name = config["vm"]["name"]
475
+ else:
476
+ console.print("[red]❌ No VM name specified and no .clonebox.yaml in current directory[/]")
477
+ console.print("[dim]Usage: clonebox open <vm-name> or clonebox open .[/]")
478
+ return
479
+
480
+ # Check if VM is running
481
+ try:
482
+ result = subprocess.run(
483
+ ["virsh", "--connect", conn_uri, "domstate", name],
484
+ capture_output=True, text=True, timeout=10
485
+ )
486
+ state = result.stdout.strip()
487
+
488
+ if state != "running":
489
+ console.print(f"[yellow]⚠️ VM '{name}' is not running (state: {state})[/]")
490
+ if questionary.confirm(
491
+ f"Start VM '{name}' and open viewer?", default=True, style=custom_style
492
+ ).ask():
493
+ cloner = SelectiveVMCloner(user_session=user_session)
494
+ cloner.start_vm(name, open_viewer=True, console=console)
495
+ else:
496
+ console.print("[dim]Use 'clonebox start' to start the VM first.[/]")
497
+ return
498
+ except Exception as e:
499
+ console.print(f"[red]❌ Error checking VM state: {e}[/]")
500
+ return
501
+
502
+ # Open virt-viewer
503
+ console.print(f"[cyan]Opening viewer for VM: {name}[/]")
504
+ try:
505
+ subprocess.run(
506
+ ["virt-viewer", "--connect", conn_uri, name],
507
+ check=True
508
+ )
509
+ except FileNotFoundError:
510
+ console.print("[red]❌ virt-viewer not found[/]")
511
+ console.print("Install with: sudo apt install virt-viewer")
512
+ except subprocess.CalledProcessError as e:
513
+ console.print(f"[red]❌ Failed to open viewer: {e}[/]")
449
514
 
450
515
 
451
516
  def cmd_stop(args):
@@ -488,6 +553,653 @@ def cmd_list(args):
488
553
  console.print(table)
489
554
 
490
555
 
556
+ def cmd_status(args):
557
+ """Check VM installation status and health from workstation."""
558
+ import subprocess
559
+
560
+ name = args.name
561
+ user_session = getattr(args, "user", False)
562
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
563
+
564
+ # If name is a path, load config to get VM name
565
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
566
+ target_path = Path(name).expanduser().resolve()
567
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
568
+ if config_file.exists():
569
+ config = load_clonebox_config(config_file)
570
+ name = config["vm"]["name"]
571
+ else:
572
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
573
+ return
574
+
575
+ if not name:
576
+ # Try current directory
577
+ config_file = Path.cwd() / ".clonebox.yaml"
578
+ if config_file.exists():
579
+ config = load_clonebox_config(config_file)
580
+ name = config["vm"]["name"]
581
+ else:
582
+ console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
583
+ return
584
+
585
+ console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
586
+
587
+ # Check VM state
588
+ try:
589
+ result = subprocess.run(
590
+ ["virsh", "--connect", conn_uri, "domstate", name],
591
+ capture_output=True, text=True, timeout=5
592
+ )
593
+ vm_state = result.stdout.strip()
594
+
595
+ if "running" in vm_state.lower():
596
+ console.print(f"[green]✅ VM State: {vm_state}[/]")
597
+ elif "shut off" in vm_state.lower():
598
+ console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
599
+ console.print("[dim]Start with: clonebox start .[/]")
600
+ return
601
+ else:
602
+ console.print(f"[dim]VM State: {vm_state}[/]")
603
+ except subprocess.TimeoutExpired:
604
+ console.print("[red]❌ Timeout checking VM state[/]")
605
+ return
606
+ except Exception as e:
607
+ console.print(f"[red]❌ Error: {e}[/]")
608
+ return
609
+
610
+ # Get VM IP address
611
+ console.print("\n[bold]🔍 Checking VM network...[/]")
612
+ try:
613
+ result = subprocess.run(
614
+ ["virsh", "--connect", conn_uri, "domifaddr", name],
615
+ capture_output=True, text=True, timeout=10
616
+ )
617
+ if result.stdout.strip():
618
+ console.print(f"[dim]{result.stdout.strip()}[/]")
619
+ # Extract IP
620
+ for line in result.stdout.split('\n'):
621
+ if 'ipv4' in line.lower():
622
+ parts = line.split()
623
+ for p in parts:
624
+ if '/' in p and '.' in p:
625
+ ip = p.split('/')[0]
626
+ console.print(f"[green]IP Address: {ip}[/]")
627
+ break
628
+ else:
629
+ console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
630
+ except Exception as e:
631
+ console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
632
+
633
+ # Check cloud-init status via console
634
+ console.print("\n[bold]☁️ Checking cloud-init status...[/]")
635
+ try:
636
+ # Use virsh console to check - this is tricky, so we check for the ready file
637
+ result = subprocess.run(
638
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
639
+ '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
640
+ capture_output=True, text=True, timeout=10
641
+ )
642
+ if "CloneBox VM ready" in result.stdout or result.returncode == 0:
643
+ console.print("[green]✅ Cloud-init: Complete[/]")
644
+ else:
645
+ console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
646
+ except Exception:
647
+ console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
648
+
649
+ # Check health status if available
650
+ console.print("\n[bold]🏥 Health Check Status...[/]")
651
+ try:
652
+ result = subprocess.run(
653
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
654
+ '{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
655
+ capture_output=True, text=True, timeout=10
656
+ )
657
+ if "HEALTH_STATUS=OK" in result.stdout:
658
+ console.print("[green]✅ Health: All checks passed[/]")
659
+ elif "HEALTH_STATUS=FAILED" in result.stdout:
660
+ console.print("[red]❌ Health: Some checks failed[/]")
661
+ else:
662
+ console.print("[yellow]⏳ Health check not yet run[/]")
663
+ except Exception:
664
+ console.print("[dim]Health status: Not available yet[/]")
665
+
666
+ # Show useful commands
667
+ console.print("\n[bold]📋 Useful commands:[/]")
668
+ console.print(f" [cyan]virt-viewer --connect {conn_uri} {name}[/] # Open GUI")
669
+ console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/] # Console access")
670
+ console.print(" [dim]Inside VM:[/]")
671
+ console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
672
+ console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
673
+ console.print(" [cyan]clonebox-health[/] # Re-run health check")
674
+
675
+ # Run full health check if requested
676
+ if getattr(args, "health", False):
677
+ console.print("\n[bold]🔄 Running full health check...[/]")
678
+ try:
679
+ result = subprocess.run(
680
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
681
+ '{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
682
+ capture_output=True, text=True, timeout=60
683
+ )
684
+ console.print("[green]Health check triggered. View results with:[/]")
685
+ console.print(f" [cyan]virsh --connect {conn_uri} console {name}[/]")
686
+ console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
687
+ except Exception as e:
688
+ console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
689
+
690
+
691
+ def cmd_export(args):
692
+ """Export VM and data for migration to another workstation."""
693
+ import subprocess
694
+ import tarfile
695
+ import shutil
696
+
697
+ name = args.name
698
+ user_session = getattr(args, "user", False)
699
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
700
+ include_data = getattr(args, "include_data", False)
701
+ output = getattr(args, "output", None)
702
+
703
+ # If name is a path, load config
704
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
705
+ target_path = Path(name).expanduser().resolve()
706
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
707
+ if config_file.exists():
708
+ config = load_clonebox_config(config_file)
709
+ name = config["vm"]["name"]
710
+ else:
711
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
712
+ return
713
+
714
+ if not name:
715
+ config_file = Path.cwd() / ".clonebox.yaml"
716
+ if config_file.exists():
717
+ config = load_clonebox_config(config_file)
718
+ name = config["vm"]["name"]
719
+ else:
720
+ console.print("[red]❌ No VM name specified[/]")
721
+ return
722
+
723
+ console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
724
+
725
+ # Determine storage path
726
+ if user_session:
727
+ storage_base = Path.home() / ".local/share/libvirt/images"
728
+ else:
729
+ storage_base = Path("/var/lib/libvirt/images")
730
+
731
+ vm_dir = storage_base / name
732
+
733
+ if not vm_dir.exists():
734
+ console.print(f"[red]❌ VM storage not found: {vm_dir}[/]")
735
+ return
736
+
737
+ # Create export directory
738
+ export_name = output or f"{name}-export.tar.gz"
739
+ if not export_name.endswith(".tar.gz"):
740
+ export_name += ".tar.gz"
741
+
742
+ export_path = Path(export_name).resolve()
743
+ temp_dir = Path(f"/tmp/clonebox-export-{name}")
744
+
745
+ try:
746
+ # Clean up temp dir if exists
747
+ if temp_dir.exists():
748
+ shutil.rmtree(temp_dir)
749
+ temp_dir.mkdir(parents=True)
750
+
751
+ # Stop VM if running
752
+ console.print("[cyan]Stopping VM for export...[/]")
753
+ subprocess.run(
754
+ ["virsh", "--connect", conn_uri, "shutdown", name],
755
+ capture_output=True, timeout=30
756
+ )
757
+ import time
758
+ time.sleep(5)
759
+ subprocess.run(
760
+ ["virsh", "--connect", conn_uri, "destroy", name],
761
+ capture_output=True, timeout=10
762
+ )
763
+
764
+ # Export VM XML
765
+ console.print("[cyan]Exporting VM definition...[/]")
766
+ result = subprocess.run(
767
+ ["virsh", "--connect", conn_uri, "dumpxml", name],
768
+ capture_output=True, text=True, timeout=30
769
+ )
770
+ (temp_dir / "vm.xml").write_text(result.stdout)
771
+
772
+ # Copy disk image
773
+ console.print("[cyan]Copying disk image (this may take a while)...[/]")
774
+ disk_image = vm_dir / f"{name}.qcow2"
775
+ if disk_image.exists():
776
+ shutil.copy2(disk_image, temp_dir / "disk.qcow2")
777
+
778
+ # Copy cloud-init ISO
779
+ cloudinit_iso = vm_dir / "cloud-init.iso"
780
+ if cloudinit_iso.exists():
781
+ shutil.copy2(cloudinit_iso, temp_dir / "cloud-init.iso")
782
+
783
+ # Copy config file
784
+ config_file = Path.cwd() / ".clonebox.yaml"
785
+ if config_file.exists():
786
+ shutil.copy2(config_file, temp_dir / ".clonebox.yaml")
787
+
788
+ # Copy .env file (without sensitive data warning)
789
+ env_file = Path.cwd() / ".env"
790
+ if env_file.exists():
791
+ shutil.copy2(env_file, temp_dir / ".env")
792
+
793
+ # Include shared data if requested
794
+ if include_data:
795
+ console.print("[cyan]Bundling shared data (browser profiles, configs)...[/]")
796
+ data_dir = temp_dir / "data"
797
+ data_dir.mkdir()
798
+
799
+ # Load config to get paths
800
+ if config_file.exists():
801
+ config = load_clonebox_config(config_file)
802
+ all_paths = config.get("paths", {}).copy()
803
+ all_paths.update(config.get("app_data_paths", {}))
804
+
805
+ for idx, (host_path, guest_path) in enumerate(all_paths.items()):
806
+ host_p = Path(host_path)
807
+ if host_p.exists():
808
+ dest = data_dir / f"mount{idx}"
809
+ console.print(f" [dim]Copying {host_path}...[/]")
810
+ try:
811
+ if host_p.is_dir():
812
+ shutil.copytree(host_p, dest, symlinks=True,
813
+ ignore=shutil.ignore_patterns('*.pyc', '__pycache__', '.git'))
814
+ else:
815
+ shutil.copy2(host_p, dest)
816
+ except Exception as e:
817
+ console.print(f" [yellow]⚠️ Skipped {host_path}: {e}[/]")
818
+
819
+ # Save path mapping
820
+ import json
821
+ (data_dir / "paths.json").write_text(json.dumps(all_paths, indent=2))
822
+
823
+ # Create tarball
824
+ console.print(f"[cyan]Creating archive: {export_path}[/]")
825
+ with tarfile.open(export_path, "w:gz") as tar:
826
+ tar.add(temp_dir, arcname=name)
827
+
828
+ # Get size
829
+ size_mb = export_path.stat().st_size / 1024 / 1024
830
+
831
+ console.print(f"\n[bold green]✅ Export complete![/]")
832
+ console.print(f" File: [cyan]{export_path}[/]")
833
+ console.print(f" Size: [cyan]{size_mb:.1f} MB[/]")
834
+ console.print(f"\n[bold]To import on another workstation:[/]")
835
+ console.print(f" [cyan]clonebox import {export_path.name}[/]")
836
+
837
+ finally:
838
+ # Cleanup
839
+ if temp_dir.exists():
840
+ shutil.rmtree(temp_dir)
841
+
842
+ # Restart VM
843
+ console.print("\n[cyan]Restarting VM...[/]")
844
+ subprocess.run(
845
+ ["virsh", "--connect", conn_uri, "start", name],
846
+ capture_output=True, timeout=30
847
+ )
848
+
849
+
850
+ def cmd_import(args):
851
+ """Import VM from export archive."""
852
+ import subprocess
853
+ import tarfile
854
+ import shutil
855
+
856
+ archive_path = Path(args.archive).resolve()
857
+ user_session = getattr(args, "user", False)
858
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
859
+
860
+ if not archive_path.exists():
861
+ console.print(f"[red]❌ Archive not found: {archive_path}[/]")
862
+ return
863
+
864
+ console.print(f"[bold cyan]📥 Importing VM from: {archive_path}[/]\n")
865
+
866
+ # Determine storage path
867
+ if user_session:
868
+ storage_base = Path.home() / ".local/share/libvirt/images"
869
+ else:
870
+ storage_base = Path("/var/lib/libvirt/images")
871
+
872
+ storage_base.mkdir(parents=True, exist_ok=True)
873
+
874
+ temp_dir = Path(f"/tmp/clonebox-import-{archive_path.stem}")
875
+
876
+ try:
877
+ # Extract archive
878
+ console.print("[cyan]Extracting archive...[/]")
879
+ if temp_dir.exists():
880
+ shutil.rmtree(temp_dir)
881
+ temp_dir.mkdir(parents=True)
882
+
883
+ with tarfile.open(archive_path, "r:gz") as tar:
884
+ tar.extractall(temp_dir)
885
+
886
+ # Find extracted VM directory
887
+ vm_dirs = list(temp_dir.iterdir())
888
+ if not vm_dirs:
889
+ console.print("[red]❌ Empty archive[/]")
890
+ return
891
+
892
+ extracted_dir = vm_dirs[0]
893
+ vm_name = extracted_dir.name
894
+
895
+ console.print(f"[cyan]VM Name: {vm_name}[/]")
896
+
897
+ # Create VM storage directory
898
+ vm_storage = storage_base / vm_name
899
+ if vm_storage.exists():
900
+ if not getattr(args, "replace", False):
901
+ console.print(f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]")
902
+ return
903
+ shutil.rmtree(vm_storage)
904
+
905
+ vm_storage.mkdir(parents=True)
906
+
907
+ # Copy disk image
908
+ console.print("[cyan]Copying disk image...[/]")
909
+ disk_src = extracted_dir / "disk.qcow2"
910
+ if disk_src.exists():
911
+ shutil.copy2(disk_src, vm_storage / f"{vm_name}.qcow2")
912
+
913
+ # Copy cloud-init ISO
914
+ cloudinit_src = extracted_dir / "cloud-init.iso"
915
+ if cloudinit_src.exists():
916
+ shutil.copy2(cloudinit_src, vm_storage / "cloud-init.iso")
917
+
918
+ # Copy config files to current directory
919
+ config_src = extracted_dir / ".clonebox.yaml"
920
+ if config_src.exists():
921
+ shutil.copy2(config_src, Path.cwd() / ".clonebox.yaml")
922
+ console.print("[green]✅ Copied .clonebox.yaml[/]")
923
+
924
+ env_src = extracted_dir / ".env"
925
+ if env_src.exists():
926
+ shutil.copy2(env_src, Path.cwd() / ".env")
927
+ console.print("[green]✅ Copied .env[/]")
928
+
929
+ # Restore data if included
930
+ data_dir = extracted_dir / "data"
931
+ if data_dir.exists():
932
+ import json
933
+ paths_file = data_dir / "paths.json"
934
+ if paths_file.exists():
935
+ paths_mapping = json.loads(paths_file.read_text())
936
+ console.print("\n[cyan]Restoring shared data...[/]")
937
+
938
+ for idx, (host_path, guest_path) in enumerate(paths_mapping.items()):
939
+ src = data_dir / f"mount{idx}"
940
+ if src.exists():
941
+ dest = Path(host_path)
942
+ console.print(f" [dim]Restoring to {host_path}...[/]")
943
+ try:
944
+ if dest.exists():
945
+ console.print(f" [yellow]⚠️ Skipped (already exists)[/]")
946
+ else:
947
+ dest.parent.mkdir(parents=True, exist_ok=True)
948
+ if src.is_dir():
949
+ shutil.copytree(src, dest)
950
+ else:
951
+ shutil.copy2(src, dest)
952
+ except Exception as e:
953
+ console.print(f" [yellow]⚠️ Error: {e}[/]")
954
+
955
+ # Modify and define VM XML
956
+ console.print("\n[cyan]Defining VM...[/]")
957
+ xml_src = extracted_dir / "vm.xml"
958
+ if xml_src.exists():
959
+ xml_content = xml_src.read_text()
960
+
961
+ # Update paths in XML to new storage location
962
+ # This is a simple replacement - may need more sophisticated handling
963
+ xml_content = xml_content.replace(
964
+ f"/home/", f"{Path.home()}/"
965
+ )
966
+
967
+ # Write modified XML
968
+ modified_xml = temp_dir / "vm-modified.xml"
969
+ modified_xml.write_text(xml_content)
970
+
971
+ # Define VM
972
+ result = subprocess.run(
973
+ ["virsh", "--connect", conn_uri, "define", str(modified_xml)],
974
+ capture_output=True, text=True, timeout=30
975
+ )
976
+
977
+ if result.returncode == 0:
978
+ console.print(f"[green]✅ VM '{vm_name}' defined successfully![/]")
979
+ else:
980
+ console.print(f"[yellow]⚠️ VM definition warning: {result.stderr}[/]")
981
+
982
+ console.print(f"\n[bold green]✅ Import complete![/]")
983
+ console.print(f"\n[bold]To start the VM:[/]")
984
+ console.print(f" [cyan]clonebox start . {'--user' if user_session else ''}[/]")
985
+
986
+ finally:
987
+ # Cleanup
988
+ if temp_dir.exists():
989
+ shutil.rmtree(temp_dir)
990
+
991
+
992
+ def cmd_test(args):
993
+ """Test VM configuration and health."""
994
+ import subprocess
995
+ import json
996
+
997
+ name = args.name
998
+ user_session = getattr(args, "user", False)
999
+ quick = getattr(args, "quick", False)
1000
+ verbose = getattr(args, "verbose", False)
1001
+ conn_uri = "qemu:///session" if user_session else "qemu:///system"
1002
+
1003
+ # If name is a path, load config
1004
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
1005
+ target_path = Path(name).expanduser().resolve()
1006
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
1007
+ if not config_file.exists():
1008
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
1009
+ return
1010
+ else:
1011
+ config_file = Path.cwd() / ".clonebox.yaml"
1012
+ if not config_file.exists():
1013
+ console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
1014
+ return
1015
+
1016
+ console.print(f"[bold cyan]🧪 Testing VM configuration: {config_file}[/]\n")
1017
+
1018
+ # Load config
1019
+ try:
1020
+ config = load_clonebox_config(config_file)
1021
+ vm_name = config["vm"]["name"]
1022
+ console.print(f"[green]✅ Config loaded successfully[/]")
1023
+ console.print(f" VM Name: {vm_name}")
1024
+ console.print(f" RAM: {config['vm']['ram_mb']}MB")
1025
+ console.print(f" vCPUs: {config['vm']['vcpus']}")
1026
+ console.print(f" GUI: {'Yes' if config['vm']['gui'] else 'No'}")
1027
+ except Exception as e:
1028
+ console.print(f"[red]❌ Failed to load config: {e}[/]")
1029
+ return
1030
+
1031
+ console.print()
1032
+
1033
+ # Test 1: Check VM exists
1034
+ console.print("[bold]1. VM Existence Check[/]")
1035
+ try:
1036
+ result = subprocess.run(
1037
+ ["virsh", "--connect", conn_uri, "dominfo", vm_name],
1038
+ capture_output=True, text=True, timeout=10
1039
+ )
1040
+ if result.returncode == 0:
1041
+ console.print("[green]✅ VM is defined in libvirt[/]")
1042
+ if verbose:
1043
+ for line in result.stdout.split('\n'):
1044
+ if ':' in line:
1045
+ console.print(f" {line}")
1046
+ else:
1047
+ console.print("[red]❌ VM not found in libvirt[/]")
1048
+ console.print(" Run: clonebox create .clonebox.yaml --start")
1049
+ return
1050
+ except Exception as e:
1051
+ console.print(f"[red]❌ Error checking VM: {e}[/]")
1052
+ return
1053
+
1054
+ console.print()
1055
+
1056
+ # Test 2: Check VM state
1057
+ console.print("[bold]2. VM State Check[/]")
1058
+ try:
1059
+ result = subprocess.run(
1060
+ ["virsh", "--connect", conn_uri, "domstate", vm_name],
1061
+ capture_output=True, text=True, timeout=10
1062
+ )
1063
+ state = result.stdout.strip()
1064
+ if state == "running":
1065
+ console.print("[green]✅ VM is running[/]")
1066
+
1067
+ # Test network if running
1068
+ console.print("\n Checking network...")
1069
+ try:
1070
+ result = subprocess.run(
1071
+ ["virsh", "--connect", conn_uri, "domifaddr", vm_name],
1072
+ capture_output=True, text=True, timeout=10
1073
+ )
1074
+ if "192.168" in result.stdout or "10.0" in result.stdout:
1075
+ console.print("[green]✅ VM has network access[/]")
1076
+ if verbose:
1077
+ for line in result.stdout.split('\n'):
1078
+ if '192.168' in line or '10.0' in line:
1079
+ console.print(f" IP: {line.split()[-1]}")
1080
+ else:
1081
+ console.print("[yellow]⚠️ No IP address detected[/]")
1082
+ except:
1083
+ console.print("[yellow]⚠️ Could not check network[/]")
1084
+ else:
1085
+ console.print(f"[yellow]⚠️ VM is not running (state: {state})[/]")
1086
+ console.print(" Run: clonebox start .")
1087
+ except Exception as e:
1088
+ console.print(f"[red]❌ Error checking VM state: {e}[/]")
1089
+
1090
+ console.print()
1091
+
1092
+ # Test 3: Check cloud-init status (if running)
1093
+ if not quick and state == "running":
1094
+ console.print("[bold]3. Cloud-init Status[/]")
1095
+ try:
1096
+ # Try to get cloud-init status via QEMU guest agent
1097
+ result = subprocess.run(
1098
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
1099
+ '{"execute":"guest-exec","arguments":{"path":"cloud-init","arg":["status"],"capture-output":true}}'],
1100
+ capture_output=True, text=True, timeout=15
1101
+ )
1102
+ if result.returncode == 0:
1103
+ try:
1104
+ response = json.loads(result.stdout)
1105
+ if "return" in response:
1106
+ pid = response["return"]["pid"]
1107
+ # Get output
1108
+ result2 = subprocess.run(
1109
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
1110
+ f'{{"execute":"guest-exec-status","arguments":{"pid":{pid}}}}'],
1111
+ capture_output=True, text=True, timeout=15
1112
+ )
1113
+ if result2.returncode == 0:
1114
+ resp2 = json.loads(result2.stdout)
1115
+ if "return" in resp2 and resp2["return"]["exited"]:
1116
+ output = resp2["return"]["out-data"]
1117
+ if output:
1118
+ import base64
1119
+ status = base64.b64decode(output).decode()
1120
+ if "done" in status.lower():
1121
+ console.print("[green]✅ Cloud-init completed[/]")
1122
+ elif "running" in status.lower():
1123
+ console.print("[yellow]⚠️ Cloud-init still running[/]")
1124
+ else:
1125
+ console.print(f"[yellow]⚠️ Cloud-init status: {status.strip()}[/]")
1126
+ except:
1127
+ pass
1128
+ except:
1129
+ console.print("[yellow]⚠️ Could not check cloud-init (QEMU agent may not be running)[/]")
1130
+
1131
+ console.print()
1132
+
1133
+ # Test 4: Check mounts (if running)
1134
+ if not quick and state == "running":
1135
+ console.print("[bold]4. Mount Points Check[/]")
1136
+ all_paths = config.get("paths", {}).copy()
1137
+ all_paths.update(config.get("app_data_paths", {}))
1138
+
1139
+ if all_paths:
1140
+ for idx, (host_path, guest_path) in enumerate(all_paths.items()):
1141
+ try:
1142
+ result = subprocess.run(
1143
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
1144
+ f'{{"execute":"guest-exec","arguments":{{"path":"test","arg":["-d","{guest_path}"],"capture-output":true}}}}'],
1145
+ capture_output=True, text=True, timeout=10
1146
+ )
1147
+ if result.returncode == 0:
1148
+ try:
1149
+ response = json.loads(result.stdout)
1150
+ if "return" in response:
1151
+ pid = response["return"]["pid"]
1152
+ result2 = subprocess.run(
1153
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
1154
+ f'{{"execute":"guest-exec-status","arguments":{"pid":{pid}}}}'],
1155
+ capture_output=True, text=True, timeout=10
1156
+ )
1157
+ if result2.returncode == 0:
1158
+ resp2 = json.loads(result2.stdout)
1159
+ if "return" in resp2 and resp2["return"]["exited"]:
1160
+ exit_code = resp2["return"]["exitcode"]
1161
+ if exit_code == 0:
1162
+ console.print(f"[green]✅ {guest_path}[/]")
1163
+ else:
1164
+ console.print(f"[red]❌ {guest_path} (not accessible)[/]")
1165
+ continue
1166
+ except:
1167
+ pass
1168
+ console.print(f"[yellow]⚠️ {guest_path} (unknown)[/]")
1169
+ except:
1170
+ console.print(f"[yellow]⚠️ {guest_path} (could not check)[/]")
1171
+ else:
1172
+ console.print("[dim]No mount points configured[/]")
1173
+
1174
+ console.print()
1175
+
1176
+ # Test 5: Run health check (if running and not quick)
1177
+ if not quick and state == "running":
1178
+ console.print("[bold]5. Health Check[/]")
1179
+ try:
1180
+ result = subprocess.run(
1181
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
1182
+ '{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
1183
+ capture_output=True, text=True, timeout=60
1184
+ )
1185
+ if result.returncode == 0:
1186
+ console.print("[green]✅ Health check triggered[/]")
1187
+ console.print(" View results in VM: cat /var/log/clonebox-health.log")
1188
+ else:
1189
+ console.print("[yellow]⚠️ Health check script not found[/]")
1190
+ console.print(" VM may not have been created with health checks")
1191
+ except Exception as e:
1192
+ console.print(f"[yellow]⚠️ Could not run health check: {e}[/]")
1193
+
1194
+ console.print()
1195
+
1196
+ # Summary
1197
+ console.print("[bold]Test Summary[/]")
1198
+ console.print("VM configuration is valid and VM is accessible.")
1199
+ console.print("\n[dim]For detailed health report, run in VM:[/]")
1200
+ console.print("[dim] cat /var/log/clonebox-health.log[/]")
1201
+
1202
+
491
1203
  CLONEBOX_CONFIG_FILE = ".clonebox.yaml"
492
1204
  CLONEBOX_ENV_FILE = ".env"
493
1205
 
@@ -709,7 +1421,7 @@ def load_clonebox_config(path: Path) -> dict:
709
1421
  return config
710
1422
 
711
1423
 
712
- def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 600):
1424
+ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 900):
713
1425
  """Monitor cloud-init status in VM and show progress."""
714
1426
  import subprocess
715
1427
  import time
@@ -763,15 +1475,17 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
763
1475
  time.sleep(2)
764
1476
  break
765
1477
 
766
- # Estimate remaining time
1478
+ # Estimate remaining time (total ~12-15 minutes for full desktop install)
767
1479
  if elapsed < 60:
768
- remaining = "~9-10 minutes"
769
- elif elapsed < 180:
770
- remaining = f"~{8 - minutes} minutes"
1480
+ remaining = "~12-15 minutes"
771
1481
  elif elapsed < 300:
772
- remaining = f"~{6 - minutes} minutes"
1482
+ remaining = f"~{12 - minutes} minutes"
1483
+ elif elapsed < 600:
1484
+ remaining = f"~{10 - minutes} minutes"
1485
+ elif elapsed < 800:
1486
+ remaining = "finishing soon..."
773
1487
  else:
774
- remaining = "finishing soon"
1488
+ remaining = "almost done"
775
1489
 
776
1490
  if restart_detected:
777
1491
  progress.update(task, description=f"[cyan]Starting GUI... ({minutes}m {seconds}s, {remaining})")
@@ -1089,6 +1803,7 @@ def main():
1089
1803
  "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
1090
1804
  )
1091
1805
  start_parser.add_argument("--no-viewer", action="store_true", help="Don't open virt-viewer")
1806
+ start_parser.add_argument("--viewer", action="store_true", help="Open virt-viewer GUI")
1092
1807
  start_parser.add_argument(
1093
1808
  "-u",
1094
1809
  "--user",
@@ -1097,6 +1812,19 @@ def main():
1097
1812
  )
1098
1813
  start_parser.set_defaults(func=cmd_start)
1099
1814
 
1815
+ # Open command - open VM viewer
1816
+ open_parser = subparsers.add_parser("open", help="Open VM viewer window")
1817
+ open_parser.add_argument(
1818
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
1819
+ )
1820
+ open_parser.add_argument(
1821
+ "-u",
1822
+ "--user",
1823
+ action="store_true",
1824
+ help="Use user session (qemu:///session) - no root required",
1825
+ )
1826
+ open_parser.set_defaults(func=cmd_open)
1827
+
1100
1828
  # Stop command
1101
1829
  stop_parser = subparsers.add_parser("stop", help="Stop a VM")
1102
1830
  stop_parser.add_argument("name", help="VM name")
@@ -1178,6 +1906,66 @@ def main():
1178
1906
  )
1179
1907
  clone_parser.set_defaults(func=cmd_clone)
1180
1908
 
1909
+ # Status command - check VM health from workstation
1910
+ status_parser = subparsers.add_parser("status", help="Check VM installation status and health")
1911
+ status_parser.add_argument(
1912
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
1913
+ )
1914
+ status_parser.add_argument(
1915
+ "-u",
1916
+ "--user",
1917
+ action="store_true",
1918
+ help="Use user session (qemu:///session)",
1919
+ )
1920
+ status_parser.add_argument(
1921
+ "--health", "-H", action="store_true", help="Run full health check"
1922
+ )
1923
+ status_parser.set_defaults(func=cmd_status)
1924
+
1925
+ # Export command - package VM for migration
1926
+ export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
1927
+ export_parser.add_argument(
1928
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
1929
+ )
1930
+ export_parser.add_argument(
1931
+ "-u", "--user", action="store_true", help="Use user session (qemu:///session)"
1932
+ )
1933
+ export_parser.add_argument(
1934
+ "-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
1935
+ )
1936
+ export_parser.add_argument(
1937
+ "--include-data", "-d", action="store_true",
1938
+ help="Include shared data (browser profiles, configs) in export"
1939
+ )
1940
+ export_parser.set_defaults(func=cmd_export)
1941
+
1942
+ # Import command - restore VM from export
1943
+ import_parser = subparsers.add_parser("import", help="Import VM from export archive")
1944
+ import_parser.add_argument("archive", help="Path to export archive (.tar.gz)")
1945
+ import_parser.add_argument(
1946
+ "-u", "--user", action="store_true", help="Use user session (qemu:///session)"
1947
+ )
1948
+ import_parser.add_argument(
1949
+ "--replace", action="store_true", help="Replace existing VM if exists"
1950
+ )
1951
+ import_parser.set_defaults(func=cmd_import)
1952
+
1953
+ # Test command - validate VM configuration
1954
+ test_parser = subparsers.add_parser("test", help="Test VM configuration and health")
1955
+ test_parser.add_argument(
1956
+ "name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
1957
+ )
1958
+ test_parser.add_argument(
1959
+ "-u", "--user", action="store_true", help="Use user session (qemu:///session)"
1960
+ )
1961
+ test_parser.add_argument(
1962
+ "--quick", action="store_true", help="Quick test (no deep health checks)"
1963
+ )
1964
+ test_parser.add_argument(
1965
+ "--verbose", "-v", action="store_true", help="Verbose output"
1966
+ )
1967
+ test_parser.set_defaults(func=cmd_test)
1968
+
1181
1969
  args = parser.parse_args()
1182
1970
 
1183
1971
  if hasattr(args, "func"):
clonebox/cloner.py CHANGED
@@ -417,9 +417,10 @@ class SelectiveVMCloner:
417
417
  ET.SubElement(cdrom, "readonly")
418
418
 
419
419
  # 9p filesystem mounts (bind mounts from host)
420
+ # Use accessmode="mapped" to allow VM user to access host files regardless of UID
420
421
  for idx, (host_path, guest_tag) in enumerate(config.paths.items()):
421
422
  if Path(host_path).exists():
422
- fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="passthrough")
423
+ fs = ET.SubElement(devices, "filesystem", type="mount", accessmode="mapped")
423
424
  ET.SubElement(fs, "driver", type="path", wrpolicy="immediate")
424
425
  ET.SubElement(fs, "source", dir=host_path)
425
426
  # Use simple tag names for 9p mounts
@@ -659,15 +660,18 @@ fi
659
660
  meta_data = f"instance-id: {config.name}\nlocal-hostname: {config.name}\n"
660
661
  (cloudinit_dir / "meta-data").write_text(meta_data)
661
662
 
662
- # Generate mount commands for 9p filesystems
663
+ # Generate mount commands and fstab entries for 9p filesystems
663
664
  mount_commands = []
665
+ fstab_entries = []
664
666
  for idx, (host_path, guest_path) in enumerate(config.paths.items()):
665
667
  if Path(host_path).exists():
666
668
  tag = f"mount{idx}"
667
669
  mount_commands.append(f" - mkdir -p {guest_path}")
668
670
  mount_commands.append(
669
- f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path}"
671
+ f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path} || true"
670
672
  )
673
+ # Add fstab entry for persistence after reboot
674
+ fstab_entries.append(f"{tag} {guest_path} 9p trans=virtio,version=9p2000.L,nofail 0 0")
671
675
 
672
676
  # User-data
673
677
  # Add desktop environment if GUI is enabled
@@ -690,7 +694,13 @@ fi
690
694
  for svc in config.services:
691
695
  runcmd_lines.append(f" - systemctl enable --now {svc} || true")
692
696
 
693
- # Add mounts
697
+ # Add fstab entries for persistent mounts after reboot
698
+ if fstab_entries:
699
+ runcmd_lines.append(" - echo '# CloneBox 9p mounts' >> /etc/fstab")
700
+ for entry in fstab_entries:
701
+ runcmd_lines.append(f" - echo '{entry}' >> /etc/fstab")
702
+
703
+ # Add mounts (immediate, before reboot)
694
704
  for cmd in mount_commands:
695
705
  runcmd_lines.append(cmd)
696
706
 
@@ -722,7 +732,8 @@ fi
722
732
 
723
733
  # Add reboot command at the end if GUI is enabled
724
734
  if config.gui:
725
- runcmd_lines.append(" - shutdown -r +1 'Rebooting to start GUI' || reboot")
735
+ runcmd_lines.append(" - echo 'Rebooting in 10 seconds to start GUI...'")
736
+ runcmd_lines.append(" - sleep 10 && reboot")
726
737
 
727
738
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
728
739
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -64,6 +64,41 @@ CloneBox lets you create isolated virtual machines with only the applications, d
64
64
  - 🖥️ **GUI support** - SPICE graphics with virt-viewer integration
65
65
  - ⚡ **Fast creation** - No full disk cloning, VMs are ready in seconds
66
66
  - 📥 **Auto-download** - Automatically downloads and caches Ubuntu cloud images (stored in ~/Downloads)
67
+ - 📊 **Health monitoring** - Built-in health checks for packages, services, and mounts
68
+ - 🔄 **VM migration** - Export/import VMs with data between workstations
69
+ - 🧪 **Configuration testing** - Validate VM settings and functionality
70
+ - 📁 **App data sync** - Include browser profiles, IDE settings, and app configs
71
+
72
+
73
+
74
+
75
+ CloneBox to narzędzie CLI do **szybkiego klonowania aktualnego środowiska workstation do izolowanej maszyny wirtualnej (VM)**.
76
+ Zamiast pełnego kopiowania dysku, używa **bind mounts** (udostępnianie katalogów na żywo) i **cloud-init** do selektywnego przeniesienia tylko potrzebnych elementów: uruchomionych usług (Docker, PostgreSQL, nginx), aplikacji, ścieżek projektów i konfiguracji. Automatycznie pobiera obrazy Ubuntu, instaluje pakiety i uruchamia VM z SPICE GUI. Idealne dla deweloperów na Linuxie – VM powstaje w minuty, bez duplikowania danych.
77
+
78
+ Kluczowe komendy:
79
+ - `clonebox` – interaktywny wizard (detect + create + start)
80
+ - `clonebox detect` – skanuje usługi/apps/ścieżki
81
+ - `clonebox clone . --user --run` – szybki klon bieżącego katalogu z użytkownikiem i autostartem
82
+
83
+ ### Dlaczego wirtualne klony workstation mają sens?
84
+
85
+ **Problem**: Developerzy/Vibecoderzy nie izolują środowisk dev/test (np. dla AI agentów), bo ręczne odtwarzanie setupu to ból – godziny na instalację apps, usług, configów, dotfiles. Przechodzenie z fizycznego PC na VM wymagałoby pełnego rebuilda, co blokuje workflow.
86
+
87
+ **Rozwiązanie CloneBox**: Automatycznie **skanuje i klonuje stan "tu i teraz"** (usługi z `ps`, dockery z `docker ps`, projekty z git/.env). VM dziedziczy środowisko bez kopiowania całego śmietnika – tylko wybrane bind mounty.
88
+
89
+ **Korzyści w twoim kontekście (embedded/distributed systems, AI automation)**:
90
+ - **Sandbox dla eksperymentów**: Testuj AI agenty, edge computing (RPi/ESP32 symulacje) czy Camel/ERP integracje w izolacji, bez psucia hosta.
91
+ - **Reprodukcja workstation**: Na firmowym PC masz setup z domu (Python/Rust/Go envs, Docker compose, Postgres dev DB) – klonujesz i pracujesz identycznie.
92
+ - **Szybkość > dotfiles**: Dotfiles odtwarzają configi, ale nie łapią runtime stanu (uruchomione serwery, otwarte projekty). CloneBox to "snapshot na sterydach".
93
+ - **Bezpieczeństwo/cost-optymalizacja**: Izolacja od plików hosta (tylko mounts), zero downtime, tanie w zasobach (libvirt/QEMU). Dla SME: szybki onboarding dev env bez migracji fizycznej.
94
+ - **AI-friendly**: Agenci LLMs (jak te z twoich hobby) mogą działać w VM z pełnym kontekstem, bez ryzyka "zasmiecania" main PC.
95
+
96
+ Przykład: Masz uruchomiony Kubernetes Podman z twoim home labem + projekt automotive leasing. `clonebox clone ~/projects --run` → VM gotowa w 30s, z tymi samymi serwisami, ale izolowana. Lepsze niż Docker (brak GUI/full OS) czy pełna migracja.
97
+
98
+ **Dlaczego ludzie tego nie robią?** Brak automatyzacji – nikt nie chce ręcznie rebuildować.
99
+ - CloneBox rozwiązuje to jednym poleceniem. Super match dla twoich interesów (distributed infra, AI tools, business automation).
100
+
101
+
67
102
 
68
103
  ## Installation
69
104
 
@@ -142,7 +177,6 @@ Simply run `clonebox` to start the interactive wizard:
142
177
  ```bash
143
178
  clonebox
144
179
  clonebox clone . --user --run --replace --base-image ~/ubuntu-22.04-cloud.qcow2
145
-
146
180
  ```
147
181
 
148
182
  The wizard will:
@@ -181,6 +215,130 @@ clonebox detect --json
181
215
 
182
216
  ## Usage Examples
183
217
 
218
+ ### Basic Workflow
219
+
220
+ ```bash
221
+ # 1. Clone current directory with auto-detection
222
+ clonebox clone . --user
223
+
224
+ # 2. Review generated config
225
+ cat .clonebox.yaml
226
+
227
+ # 3. Create and start VM
228
+ clonebox start . --user --viewer
229
+
230
+ # 4. Check VM status
231
+ clonebox status . --user
232
+
233
+ # 5. Open VM window later
234
+ clonebox open . --user
235
+
236
+ # 6. Stop VM when done
237
+ clonebox stop . --user
238
+ ```
239
+
240
+ ### Development Environment with Browser Profiles
241
+
242
+ ```bash
243
+ # Clone with app data (browser profiles, IDE settings)
244
+ clonebox clone . --user --run
245
+
246
+ # VM will have:
247
+ # - All your project directories
248
+ # - Browser profiles (Chrome, Firefox) with bookmarks and passwords
249
+ # - IDE settings (PyCharm, VSCode)
250
+ # - Docker containers and services
251
+
252
+ # Access in VM:
253
+ ls ~/.config/google-chrome # Chrome profile
254
+ ls ~/.mozilla/firefox # Firefox profile
255
+ ls ~/.config/JetBrains # PyCharm settings
256
+ ```
257
+
258
+ ### Testing VM Configuration
259
+
260
+ ```bash
261
+ # Quick test - basic checks
262
+ clonebox test . --user --quick
263
+
264
+ # Full test with verbose output
265
+ clonebox test . --user --verbose
266
+
267
+ # Test output shows:
268
+ # ✅ VM is defined in libvirt
269
+ # ✅ VM is running
270
+ # ✅ VM has network access (IP: 192.168.122.89)
271
+ # ✅ Cloud-init completed
272
+ # ✅ All mount points accessible
273
+ # ✅ Health check triggered
274
+ ```
275
+
276
+ ### VM Health Monitoring
277
+
278
+ ```bash
279
+ # Check overall status
280
+ clonebox status . --user
281
+
282
+ # Output:
283
+ # 📊 Checking VM status: clone-clonebox
284
+ # ✅ VM State: running
285
+ # ✅ VM has network access
286
+ # ☁️ Cloud-init: Still running (packages installing)
287
+ # 🏥 Health Check Status... ⏳ Health check not yet run
288
+
289
+ # Trigger health check
290
+ clonebox status . --user --health
291
+
292
+ # View detailed health report in VM:
293
+ # cat /var/log/clonebox-health.log
294
+ ```
295
+
296
+ ### Export/Import Workflow
297
+
298
+ ```bash
299
+ # On workstation A - Export VM with all data
300
+ clonebox export . --user --include-data -o my-dev-env.tar.gz
301
+
302
+ # Transfer file to workstation B, then import
303
+ clonebox import my-dev-env.tar.gz --user
304
+
305
+ # Start VM on new workstation
306
+ clonebox start . --user
307
+ clonebox open . --user
308
+
309
+ # VM includes:
310
+ # - Complete disk image
311
+ # - All browser profiles and settings
312
+ # - Project files
313
+ # - Docker images and containers
314
+ ```
315
+
316
+ ### Troubleshooting Common Issues
317
+
318
+ ```bash
319
+ # If mounts are empty after reboot:
320
+ clonebox status . --user # Check VM status
321
+ # Then in VM:
322
+ sudo mount -a # Remount all fstab entries
323
+
324
+ # If browser profiles don't sync:
325
+ rm .clonebox.yaml
326
+ clonebox clone . --user --run --replace
327
+
328
+ # If GUI doesn't open:
329
+ clonebox open . --user # Easiest way
330
+ # or:
331
+ virt-viewer --connect qemu:///session clone-clonebox
332
+
333
+ # Check VM details:
334
+ clonebox list # List all VMs
335
+ virsh --connect qemu:///session dominfo clone-clonebox
336
+ ```
337
+
338
+ ## Legacy Examples (Manual Config)
339
+
340
+ These examples use the older `create` command with manual JSON config. For most users, the `clone` command with auto-detection is easier.
341
+
184
342
  ### Python Development Environment
185
343
 
186
344
  ```bash
@@ -222,17 +380,36 @@ clonebox create --name fullstack --config '{
222
380
 
223
381
  ## Inside the VM
224
382
 
225
- After the VM boots, mount shared directories:
383
+ After the VM boots, shared directories are automatically mounted via fstab entries. You can check their status:
226
384
 
227
385
  ```bash
228
- # Mount shared paths (9p filesystem)
229
- sudo mkdir -p /mnt/projects
230
- sudo mount -t 9p -o trans=virtio,version=9p2000.L mount0 /mnt/projects
386
+ # Check mount status
387
+ mount | grep 9p
231
388
 
232
- # Or add to /etc/fstab for permanent mount
233
- echo "mount0 /mnt/projects 9p trans=virtio,version=9p2000.L 0 0" | sudo tee -a /etc/fstab
389
+ # View health check report
390
+ cat /var/log/clonebox-health.log
391
+
392
+ # Re-run health check manually
393
+ clonebox-health
394
+
395
+ # Check cloud-init status
396
+ sudo cloud-init status
397
+
398
+ # Manual mount (if needed)
399
+ sudo mkdir -p /mnt/projects
400
+ sudo mount -t 9p -o trans=virtio,version=9p2000.L,nofail mount0 /mnt/projects
234
401
  ```
235
402
 
403
+ ### Health Check System
404
+
405
+ CloneBox includes automated health checks that verify:
406
+ - Package installation (apt/snap)
407
+ - Service status
408
+ - Mount points accessibility
409
+ - GUI readiness
410
+
411
+ Health check logs are saved to `/var/log/clonebox-health.log` with a summary in `/var/log/clonebox-health-status`.
412
+
236
413
  ## Architecture
237
414
 
238
415
  ```
@@ -399,6 +576,7 @@ clonebox clone . --network auto
399
576
  | `clonebox clone . --network user` | Use user-mode networking (slirp) |
400
577
  | `clonebox clone . --network auto` | Auto-detect network mode (default) |
401
578
  | `clonebox start .` | Start VM from `.clonebox.yaml` in current dir |
579
+ | `clonebox start . --viewer` | Start VM and open GUI window |
402
580
  | `clonebox start <name>` | Start existing VM by name |
403
581
  | `clonebox stop <name>` | Stop a VM (graceful shutdown) |
404
582
  | `clonebox stop -f <name>` | Force stop a VM |
@@ -408,6 +586,14 @@ clonebox clone . --network auto
408
586
  | `clonebox detect --yaml` | Output as YAML config |
409
587
  | `clonebox detect --yaml --dedupe` | YAML with duplicates removed |
410
588
  | `clonebox detect --json` | Output as JSON |
589
+ | `clonebox status . --user` | Check VM health, cloud-init status, and IP address |
590
+ | `clonebox test . --user` | Test VM configuration and validate all settings |
591
+ | `clonebox export . --user` | Export VM for migration to another workstation |
592
+ | `clonebox export . --user --include-data` | Export VM with browser profiles and configs |
593
+ | `clonebox import archive.tar.gz --user` | Import VM from export archive |
594
+ | `clonebox open . --user` | Open GUI viewer for VM (same as virt-viewer) |
595
+ | `virt-viewer --connect qemu:///session <vm>` | Open GUI for running VM |
596
+ | `virsh --connect qemu:///session console <vm>` | Open text console (Ctrl+] to exit) |
411
597
 
412
598
  ## Requirements
413
599
 
@@ -479,6 +665,102 @@ sudo apt install virt-viewer
479
665
  virt-viewer --connect qemu:///session <vm-name>
480
666
  ```
481
667
 
668
+ ### Browser Profiles Not Syncing
669
+
670
+ If browser profiles or app data aren't available:
671
+
672
+ 1. **Regenerate config with app data:**
673
+ ```bash
674
+ rm .clonebox.yaml
675
+ clonebox clone . --user --run --replace
676
+ ```
677
+
678
+ 2. **Check mount permissions in VM:**
679
+ ```bash
680
+ # Verify mounts are accessible
681
+ ls -la ~/.config/google-chrome
682
+ ls -la ~/.mozilla/firefox
683
+ ```
684
+
685
+ ### Mount Points Empty After Reboot
686
+
687
+ If shared directories appear empty after VM restart:
688
+
689
+ 1. **Check fstab entries:**
690
+ ```bash
691
+ cat /etc/fstab | grep 9p
692
+ ```
693
+
694
+ 2. **Mount manually:**
695
+ ```bash
696
+ sudo mount -a
697
+ ```
698
+
699
+ 3. **Verify access mode:**
700
+ - VMs created with `accessmode="mapped"` allow any user to access mounts
701
+ - Older VMs used `accessmode="passthrough"` which preserves host UIDs
702
+
703
+ ## Advanced Usage
704
+
705
+ ### VM Migration Between Workstations
706
+
707
+ Export your complete VM environment:
708
+
709
+ ```bash
710
+ # Export VM with all data
711
+ clonebox export . --user --include-data -o my-dev-env.tar.gz
712
+
713
+ # Transfer to new workstation, then import
714
+ clonebox import my-dev-env.tar.gz --user
715
+ clonebox start . --user
716
+ ```
717
+
718
+ ### Testing VM Configuration
719
+
720
+ Validate your VM setup:
721
+
722
+ ```bash
723
+ # Quick test (basic checks)
724
+ clonebox test . --user --quick
725
+
726
+ # Full test (includes health checks)
727
+ clonebox test . --user --verbose
728
+ ```
729
+
730
+ ### Monitoring VM Health
731
+
732
+ Check VM status from workstation:
733
+
734
+ ```bash
735
+ # Check VM state, IP, cloud-init, and health
736
+ clonebox status . --user
737
+
738
+ # Trigger health check in VM
739
+ clonebox status . --user --health
740
+ ```
741
+
742
+ ### Reopening VM Window
743
+
744
+ If you close the VM window, you can reopen it:
745
+
746
+ ```bash
747
+ # Open GUI viewer (easiest)
748
+ clonebox open . --user
749
+
750
+ # Start VM and open GUI (if VM is stopped)
751
+ clonebox start . --user --viewer
752
+
753
+ # Open GUI for running VM
754
+ virt-viewer --connect qemu:///session clone-clonebox
755
+
756
+ # List VMs to get the correct name
757
+ clonebox list
758
+
759
+ # Text console (no GUI)
760
+ virsh --connect qemu:///session console clone-clonebox
761
+ # Press Ctrl + ] to exit console
762
+ ```
763
+
482
764
  ## License
483
765
 
484
766
  MIT License - see [LICENSE](LICENSE) file.
@@ -0,0 +1,11 @@
1
+ clonebox/__init__.py,sha256=IOk7G0DiSQ33EGbFC0xbnnFB9aou_6yuyFxvycQEvA0,407
2
+ clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
+ clonebox/cli.py,sha256=NFScoojeI1XJ982SuNt01iW52hHIYSttGy2UzZuJCCQ,76413
4
+ clonebox/cloner.py,sha256=0puM04SzifccPfIVqc2CXFFcdNLWKpbiXXbBplrm9s8,31850
5
+ clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
+ clonebox-0.1.10.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ clonebox-0.1.10.dist-info/METADATA,sha256=hzdP5bntKA1s_RyRLx0hgbBzGrlt3NBGvDKu5YZfabE,24526
8
+ clonebox-0.1.10.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
+ clonebox-0.1.10.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
10
+ clonebox-0.1.10.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
11
+ clonebox-0.1.10.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- clonebox/__init__.py,sha256=IOk7G0DiSQ33EGbFC0xbnnFB9aou_6yuyFxvycQEvA0,407
2
- clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
- clonebox/cli.py,sha256=Kp-1C9Be39ZK9fVe-4bQLyb25W2otXk8597MvOOee7M,43133
4
- clonebox/cloner.py,sha256=CMy0NWxOiUMULaQuDHY0_LDUaELW0_h4ewj_dZ_5WHw,31171
5
- clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
- clonebox-0.1.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
- clonebox-0.1.8.dist-info/METADATA,sha256=6a8ry0WjVyY21D3r1teMvSEL7zwVaoweH9GRtFxnZJY,15582
8
- clonebox-0.1.8.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
- clonebox-0.1.8.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
10
- clonebox-0.1.8.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
11
- clonebox-0.1.8.dist-info/RECORD,,