clonebox 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/__init__.py CHANGED
@@ -5,7 +5,7 @@ Selectively clone applications, paths and services to a new virtual machine
5
5
  with bind mounts instead of full disk cloning.
6
6
  """
7
7
 
8
- __version__ = "0.1.12"
8
+ __version__ = "0.1.13"
9
9
  __author__ = "CloneBox Team"
10
10
 
11
11
  from clonebox.cloner import SelectiveVMCloner
clonebox/cli.py CHANGED
@@ -515,21 +515,47 @@ def cmd_open(args):
515
515
 
516
516
  def cmd_stop(args):
517
517
  """Stop a VM."""
518
+ name = args.name
519
+
520
+ # If name is a path, load config
521
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
522
+ target_path = Path(name).expanduser().resolve()
523
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
524
+ if config_file.exists():
525
+ config = load_clonebox_config(config_file)
526
+ name = config["vm"]["name"]
527
+ else:
528
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
529
+ return
530
+
518
531
  cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
519
- cloner.stop_vm(args.name, force=args.force, console=console)
532
+ cloner.stop_vm(name, force=args.force, console=console)
520
533
 
521
534
 
522
535
  def cmd_delete(args):
523
536
  """Delete a VM."""
537
+ name = args.name
538
+
539
+ # If name is a path, load config
540
+ if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
541
+ target_path = Path(name).expanduser().resolve()
542
+ config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
543
+ if config_file.exists():
544
+ config = load_clonebox_config(config_file)
545
+ name = config["vm"]["name"]
546
+ else:
547
+ console.print(f"[red]❌ Config not found: {config_file}[/]")
548
+ return
549
+
524
550
  if not args.yes:
525
551
  if not questionary.confirm(
526
- f"Delete VM '{args.name}' and its storage?", default=False, style=custom_style
552
+ f"Delete VM '{name}' and its storage?", default=False, style=custom_style
527
553
  ).ask():
528
554
  console.print("[yellow]Cancelled.[/]")
529
555
  return
530
556
 
531
557
  cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
532
- cloner.delete_vm(args.name, delete_storage=not args.keep_storage, console=console)
558
+ cloner.delete_vm(name, delete_storage=not args.keep_storage, console=console)
533
559
 
534
560
 
535
561
  def cmd_list(args):
@@ -646,6 +672,109 @@ def cmd_status(args):
646
672
  except Exception:
647
673
  console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
648
674
 
675
+ # Check mount status
676
+ console.print("\n[bold]💾 Checking mount status...[/]")
677
+ try:
678
+ # Load config to get expected mounts
679
+ config_file = Path.cwd() / ".clonebox.yaml"
680
+ if config_file.exists():
681
+ config = load_clonebox_config(config_file)
682
+ all_paths = config.get("paths", {}).copy()
683
+ all_paths.update(config.get("app_data_paths", {}))
684
+
685
+ if all_paths:
686
+ # Check which mounts are active
687
+ result = subprocess.run(
688
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
689
+ '{"execute":"guest-exec","arguments":{"path":"/bin/sh","arg":["-c","mount | grep 9p"],"capture-output":true}}'],
690
+ capture_output=True, text=True, timeout=10
691
+ )
692
+
693
+ mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
694
+ mount_table.add_column("Guest Path", style="bold")
695
+ mount_table.add_column("Status", justify="center")
696
+ mount_table.add_column("Files", justify="right")
697
+
698
+ mounted_paths = []
699
+ if result.returncode == 0 and "return" in result.stdout:
700
+ # Parse guest-exec response for mount output
701
+ import json
702
+ try:
703
+ resp = json.loads(result.stdout)
704
+ if "return" in resp and "pid" in resp["return"]:
705
+ # Get the output from guest-exec-status
706
+ pid = resp["return"]["pid"]
707
+ status_result = subprocess.run(
708
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
709
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
710
+ capture_output=True, text=True, timeout=5
711
+ )
712
+ if status_result.returncode == 0:
713
+ status_resp = json.loads(status_result.stdout)
714
+ if "return" in status_resp and "out-data" in status_resp["return"]:
715
+ import base64
716
+ mount_output = base64.b64decode(status_resp["return"]["out-data"]).decode()
717
+ mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
718
+ except:
719
+ pass
720
+
721
+ # Check each expected mount
722
+ working_mounts = 0
723
+ total_mounts = 0
724
+ for host_path, guest_path in all_paths.items():
725
+ total_mounts += 1
726
+ is_mounted = any(guest_path in mp for mp in mounted_paths)
727
+
728
+ # Try to get file count
729
+ file_count = "?"
730
+ if is_mounted:
731
+ try:
732
+ count_result = subprocess.run(
733
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
734
+ f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","ls -A {guest_path} 2>/dev/null | wc -l"],"capture-output":true}}}}'],
735
+ capture_output=True, text=True, timeout=5
736
+ )
737
+ if count_result.returncode == 0:
738
+ resp = json.loads(count_result.stdout)
739
+ if "return" in resp and "pid" in resp["return"]:
740
+ pid = resp["return"]["pid"]
741
+ import time
742
+ time.sleep(0.5)
743
+ status_result = subprocess.run(
744
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
745
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
746
+ capture_output=True, text=True, timeout=5
747
+ )
748
+ if status_result.returncode == 0:
749
+ status_resp = json.loads(status_result.stdout)
750
+ if "return" in status_resp and "out-data" in status_resp["return"]:
751
+ file_count = base64.b64decode(status_resp["return"]["out-data"]).decode().strip()
752
+ except:
753
+ pass
754
+
755
+ if is_mounted:
756
+ status = "[green]✅ Mounted[/]"
757
+ working_mounts += 1
758
+ else:
759
+ status = "[red]❌ Not mounted[/]"
760
+
761
+ mount_table.add_row(guest_path, status, str(file_count))
762
+
763
+ console.print(mount_table)
764
+ console.print(f"[dim]{working_mounts}/{total_mounts} mounts active[/]")
765
+
766
+ if working_mounts < total_mounts:
767
+ console.print("[yellow]⚠️ Some mounts are missing. Try remounting in VM:[/]")
768
+ console.print("[dim] sudo mount -a[/]")
769
+ console.print("[dim]Or rebuild VM with: clonebox clone . --user --run --replace[/]")
770
+ else:
771
+ console.print("[dim]No mount points configured[/]")
772
+ else:
773
+ console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
774
+ except Exception as e:
775
+ console.print(f"[yellow]⚠️ Cannot check mounts: {e}[/]")
776
+ console.print("[dim]QEMU guest agent may not be ready yet[/]")
777
+
649
778
  # Check health status if available
650
779
  console.print("\n[bold]🏥 Health Check Status...[/]")
651
780
  try:
@@ -710,28 +839,49 @@ def cmd_export(args):
710
839
  else:
711
840
  console.print(f"[red]❌ Config not found: {config_file}[/]")
712
841
  return
713
-
714
- if not name:
842
+ elif not name or name == ".":
715
843
  config_file = Path.cwd() / ".clonebox.yaml"
716
844
  if config_file.exists():
717
845
  config = load_clonebox_config(config_file)
718
846
  name = config["vm"]["name"]
719
847
  else:
720
- console.print("[red]❌ No VM name specified[/]")
848
+ console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
849
+ console.print("[dim]Usage: clonebox export . or clonebox export <vm-name>[/]")
721
850
  return
722
851
 
723
852
  console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
724
853
 
725
- # Determine storage path
726
- if user_session:
727
- storage_base = Path.home() / ".local/share/libvirt/images"
728
- else:
729
- storage_base = Path("/var/lib/libvirt/images")
730
-
731
- vm_dir = storage_base / name
732
-
733
- if not vm_dir.exists():
734
- console.print(f"[red]❌ VM storage not found: {vm_dir}[/]")
854
+ # Get actual disk location from virsh
855
+ try:
856
+ result = subprocess.run(
857
+ ["virsh", "--connect", conn_uri, "domblklist", name, "--details"],
858
+ capture_output=True, text=True, timeout=10
859
+ )
860
+ if result.returncode != 0:
861
+ console.print(f"[red]❌ VM '{name}' not found[/]")
862
+ return
863
+
864
+ # Parse disk paths from output
865
+ disk_path = None
866
+ cloudinit_path = None
867
+ for line in result.stdout.split('\n'):
868
+ if 'disk' in line and '.qcow2' in line:
869
+ parts = line.split()
870
+ if len(parts) >= 4:
871
+ disk_path = Path(parts[3])
872
+ elif 'cdrom' in line or '.iso' in line:
873
+ parts = line.split()
874
+ if len(parts) >= 4:
875
+ cloudinit_path = Path(parts[3])
876
+
877
+ if not disk_path or not disk_path.exists():
878
+ console.print(f"[red]❌ VM disk not found[/]")
879
+ return
880
+
881
+ console.print(f"[dim]Disk location: {disk_path}[/]")
882
+
883
+ except Exception as e:
884
+ console.print(f"[red]❌ Error getting VM disk: {e}[/]")
735
885
  return
736
886
 
737
887
  # Create export directory
@@ -771,14 +921,16 @@ def cmd_export(args):
771
921
 
772
922
  # Copy disk image
773
923
  console.print("[cyan]Copying disk image (this may take a while)...[/]")
774
- disk_image = vm_dir / f"{name}.qcow2"
775
- if disk_image.exists():
776
- shutil.copy2(disk_image, temp_dir / "disk.qcow2")
924
+ if disk_path and disk_path.exists():
925
+ shutil.copy2(disk_path, temp_dir / "disk.qcow2")
926
+ console.print(f"[green]✅ Disk copied: {disk_path.stat().st_size / (1024**3):.2f} GB[/]")
927
+ else:
928
+ console.print("[yellow]⚠️ Disk image not found[/]")
777
929
 
778
930
  # Copy cloud-init ISO
779
- cloudinit_iso = vm_dir / "cloud-init.iso"
780
- if cloudinit_iso.exists():
781
- shutil.copy2(cloudinit_iso, temp_dir / "cloud-init.iso")
931
+ if cloudinit_path and cloudinit_path.exists():
932
+ shutil.copy2(cloudinit_path, temp_dir / "cloud-init.iso")
933
+ console.print("[green]✅ Cloud-init ISO copied[/]")
782
934
 
783
935
  # Copy config file
784
936
  config_file = Path.cwd() / ".clonebox.yaml"
@@ -993,11 +1145,13 @@ def cmd_test(args):
993
1145
  """Test VM configuration and health."""
994
1146
  import subprocess
995
1147
  import json
1148
+ from clonebox.validator import VMValidator
996
1149
 
997
1150
  name = args.name
998
1151
  user_session = getattr(args, "user", False)
999
1152
  quick = getattr(args, "quick", False)
1000
1153
  verbose = getattr(args, "verbose", False)
1154
+ validate_all = getattr(args, "validate", False)
1001
1155
  conn_uri = "qemu:///session" if user_session else "qemu:///system"
1002
1156
 
1003
1157
  # If name is a path, load config
@@ -1827,7 +1981,7 @@ def main():
1827
1981
 
1828
1982
  # Stop command
1829
1983
  stop_parser = subparsers.add_parser("stop", help="Stop a VM")
1830
- stop_parser.add_argument("name", help="VM name")
1984
+ stop_parser.add_argument("name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml")
1831
1985
  stop_parser.add_argument("--force", "-f", action="store_true", help="Force stop")
1832
1986
  stop_parser.add_argument(
1833
1987
  "-u",
@@ -1839,7 +1993,7 @@ def main():
1839
1993
 
1840
1994
  # Delete command
1841
1995
  delete_parser = subparsers.add_parser("delete", help="Delete a VM")
1842
- delete_parser.add_argument("name", help="VM name")
1996
+ delete_parser.add_argument("name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml")
1843
1997
  delete_parser.add_argument("--yes", "-y", action="store_true", help="Skip confirmation")
1844
1998
  delete_parser.add_argument("--keep-storage", action="store_true", help="Keep disk images")
1845
1999
  delete_parser.add_argument(
clonebox/cloner.py CHANGED
@@ -666,12 +666,16 @@ fi
666
666
  for idx, (host_path, guest_path) in enumerate(config.paths.items()):
667
667
  if Path(host_path).exists():
668
668
  tag = f"mount{idx}"
669
+ # Use uid=1000,gid=1000 to give ubuntu user access to mounts
670
+ # mmap allows proper file mapping
671
+ mount_opts = "trans=virtio,version=9p2000.L,mmap,uid=1000,gid=1000"
669
672
  mount_commands.append(f" - mkdir -p {guest_path}")
673
+ mount_commands.append(f" - chown 1000:1000 {guest_path}")
670
674
  mount_commands.append(
671
- f" - mount -t 9p -o trans=virtio,version=9p2000.L {tag} {guest_path} || true"
675
+ f" - mount -t 9p -o {mount_opts} {tag} {guest_path} || true"
672
676
  )
673
677
  # Add fstab entry for persistence after reboot
674
- fstab_entries.append(f"{tag} {guest_path} 9p trans=virtio,version=9p2000.L,nofail 0 0")
678
+ fstab_entries.append(f"{tag} {guest_path} 9p {mount_opts},nofail 0 0")
675
679
 
676
680
  # User-data
677
681
  # Add desktop environment if GUI is enabled
clonebox/validator.py ADDED
@@ -0,0 +1,374 @@
1
+ """
2
+ VM validation module - validates VM state against YAML configuration.
3
+ """
4
+ import subprocess
5
+ import json
6
+ import base64
7
+ import time
8
+ from typing import Dict, List, Tuple, Optional
9
+ from pathlib import Path
10
+ from rich.console import Console
11
+ from rich.table import Table
12
+
13
+
14
+ class VMValidator:
15
+ """Validates VM configuration against expected state from YAML."""
16
+
17
+ def __init__(self, config: dict, vm_name: str, conn_uri: str, console: Console = None):
18
+ self.config = config
19
+ self.vm_name = vm_name
20
+ self.conn_uri = conn_uri
21
+ self.console = console or Console()
22
+ self.results = {
23
+ "mounts": {"passed": 0, "failed": 0, "total": 0, "details": []},
24
+ "packages": {"passed": 0, "failed": 0, "total": 0, "details": []},
25
+ "snap_packages": {"passed": 0, "failed": 0, "total": 0, "details": []},
26
+ "services": {"passed": 0, "failed": 0, "total": 0, "details": []},
27
+ "overall": "unknown"
28
+ }
29
+
30
+ def _exec_in_vm(self, command: str, timeout: int = 10) -> Optional[str]:
31
+ """Execute command in VM using QEMU guest agent."""
32
+ try:
33
+ # Execute command
34
+ result = subprocess.run(
35
+ ["virsh", "--connect", self.conn_uri, "qemu-agent-command", self.vm_name,
36
+ f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","{command}"],"capture-output":true}}}}'],
37
+ capture_output=True, text=True, timeout=timeout
38
+ )
39
+
40
+ if result.returncode != 0:
41
+ return None
42
+
43
+ response = json.loads(result.stdout)
44
+ if "return" not in response or "pid" not in response["return"]:
45
+ return None
46
+
47
+ pid = response["return"]["pid"]
48
+
49
+ # Wait a bit for command to complete
50
+ time.sleep(0.3)
51
+
52
+ # Get result
53
+ status_result = subprocess.run(
54
+ ["virsh", "--connect", self.conn_uri, "qemu-agent-command", self.vm_name,
55
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
56
+ capture_output=True, text=True, timeout=5
57
+ )
58
+
59
+ if status_result.returncode != 0:
60
+ return None
61
+
62
+ status_resp = json.loads(status_result.stdout)
63
+ if "return" not in status_resp:
64
+ return None
65
+
66
+ ret = status_resp["return"]
67
+ if not ret.get("exited", False):
68
+ return None
69
+
70
+ if "out-data" in ret:
71
+ return base64.b64decode(ret["out-data"]).decode().strip()
72
+
73
+ return ""
74
+
75
+ except Exception:
76
+ return None
77
+
78
+ def validate_mounts(self) -> Dict:
79
+ """Validate all mount points are accessible and contain data."""
80
+ self.console.print("\n[bold]💾 Validating Mount Points...[/]")
81
+
82
+ all_paths = self.config.get("paths", {}).copy()
83
+ all_paths.update(self.config.get("app_data_paths", {}))
84
+
85
+ if not all_paths:
86
+ self.console.print("[dim]No mount points configured[/]")
87
+ return self.results["mounts"]
88
+
89
+ # Get mounted filesystems
90
+ mount_output = self._exec_in_vm("mount | grep 9p")
91
+ mounted_paths = []
92
+ if mount_output:
93
+ mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
94
+
95
+ mount_table = Table(title="Mount Validation", border_style="cyan")
96
+ mount_table.add_column("Guest Path", style="bold")
97
+ mount_table.add_column("Mounted", justify="center")
98
+ mount_table.add_column("Accessible", justify="center")
99
+ mount_table.add_column("Files", justify="right")
100
+
101
+ for host_path, guest_path in all_paths.items():
102
+ self.results["mounts"]["total"] += 1
103
+
104
+ # Check if mounted
105
+ is_mounted = any(guest_path in mp for mp in mounted_paths)
106
+
107
+ # Check if accessible
108
+ accessible = False
109
+ file_count = "?"
110
+
111
+ if is_mounted:
112
+ test_result = self._exec_in_vm(f"test -d {guest_path} && echo 'yes' || echo 'no'")
113
+ accessible = test_result == "yes"
114
+
115
+ if accessible:
116
+ # Get file count
117
+ count_str = self._exec_in_vm(f"ls -A {guest_path} 2>/dev/null | wc -l")
118
+ if count_str and count_str.isdigit():
119
+ file_count = count_str
120
+
121
+ # Determine status
122
+ if is_mounted and accessible:
123
+ mount_status = "[green]✅[/]"
124
+ access_status = "[green]✅[/]"
125
+ self.results["mounts"]["passed"] += 1
126
+ status = "pass"
127
+ elif is_mounted:
128
+ mount_status = "[green]✅[/]"
129
+ access_status = "[red]❌[/]"
130
+ self.results["mounts"]["failed"] += 1
131
+ status = "mounted_but_inaccessible"
132
+ else:
133
+ mount_status = "[red]❌[/]"
134
+ access_status = "[dim]N/A[/]"
135
+ self.results["mounts"]["failed"] += 1
136
+ status = "not_mounted"
137
+
138
+ mount_table.add_row(guest_path, mount_status, access_status, str(file_count))
139
+
140
+ self.results["mounts"]["details"].append({
141
+ "path": guest_path,
142
+ "mounted": is_mounted,
143
+ "accessible": accessible,
144
+ "files": file_count,
145
+ "status": status
146
+ })
147
+
148
+ self.console.print(mount_table)
149
+ self.console.print(f"[dim]{self.results['mounts']['passed']}/{self.results['mounts']['total']} mounts working[/]")
150
+
151
+ return self.results["mounts"]
152
+
153
+ def validate_packages(self) -> Dict:
154
+ """Validate APT packages are installed."""
155
+ self.console.print("\n[bold]📦 Validating APT Packages...[/]")
156
+
157
+ packages = self.config.get("packages", [])
158
+ if not packages:
159
+ self.console.print("[dim]No APT packages configured[/]")
160
+ return self.results["packages"]
161
+
162
+ pkg_table = Table(title="Package Validation", border_style="cyan")
163
+ pkg_table.add_column("Package", style="bold")
164
+ pkg_table.add_column("Status", justify="center")
165
+ pkg_table.add_column("Version", style="dim")
166
+
167
+ for package in packages:
168
+ self.results["packages"]["total"] += 1
169
+
170
+ # Check if installed
171
+ check_cmd = f"dpkg -l | grep -E '^ii {package}' | awk '{{print $3}}'"
172
+ version = self._exec_in_vm(check_cmd)
173
+
174
+ if version:
175
+ pkg_table.add_row(package, "[green]✅ Installed[/]", version[:40])
176
+ self.results["packages"]["passed"] += 1
177
+ self.results["packages"]["details"].append({
178
+ "package": package,
179
+ "installed": True,
180
+ "version": version
181
+ })
182
+ else:
183
+ pkg_table.add_row(package, "[red]❌ Missing[/]", "")
184
+ self.results["packages"]["failed"] += 1
185
+ self.results["packages"]["details"].append({
186
+ "package": package,
187
+ "installed": False,
188
+ "version": None
189
+ })
190
+
191
+ self.console.print(pkg_table)
192
+ self.console.print(f"[dim]{self.results['packages']['passed']}/{self.results['packages']['total']} packages installed[/]")
193
+
194
+ return self.results["packages"]
195
+
196
+ def validate_snap_packages(self) -> Dict:
197
+ """Validate snap packages are installed."""
198
+ self.console.print("\n[bold]📦 Validating Snap Packages...[/]")
199
+
200
+ snap_packages = self.config.get("snap_packages", [])
201
+ if not snap_packages:
202
+ self.console.print("[dim]No snap packages configured[/]")
203
+ return self.results["snap_packages"]
204
+
205
+ snap_table = Table(title="Snap Package Validation", border_style="cyan")
206
+ snap_table.add_column("Package", style="bold")
207
+ snap_table.add_column("Status", justify="center")
208
+ snap_table.add_column("Version", style="dim")
209
+
210
+ for package in snap_packages:
211
+ self.results["snap_packages"]["total"] += 1
212
+
213
+ # Check if installed
214
+ check_cmd = f"snap list | grep '^{package}' | awk '{{print $2}}'"
215
+ version = self._exec_in_vm(check_cmd)
216
+
217
+ if version:
218
+ snap_table.add_row(package, "[green]✅ Installed[/]", version[:40])
219
+ self.results["snap_packages"]["passed"] += 1
220
+ self.results["snap_packages"]["details"].append({
221
+ "package": package,
222
+ "installed": True,
223
+ "version": version
224
+ })
225
+ else:
226
+ snap_table.add_row(package, "[red]❌ Missing[/]", "")
227
+ self.results["snap_packages"]["failed"] += 1
228
+ self.results["snap_packages"]["details"].append({
229
+ "package": package,
230
+ "installed": False,
231
+ "version": None
232
+ })
233
+
234
+ self.console.print(snap_table)
235
+ self.console.print(f"[dim]{self.results['snap_packages']['passed']}/{self.results['snap_packages']['total']} snap packages installed[/]")
236
+
237
+ return self.results["snap_packages"]
238
+
239
+ def validate_services(self) -> Dict:
240
+ """Validate services are enabled and running."""
241
+ self.console.print("\n[bold]⚙️ Validating Services...[/]")
242
+
243
+ services = self.config.get("services", [])
244
+ if not services:
245
+ self.console.print("[dim]No services configured[/]")
246
+ return self.results["services"]
247
+
248
+ svc_table = Table(title="Service Validation", border_style="cyan")
249
+ svc_table.add_column("Service", style="bold")
250
+ svc_table.add_column("Enabled", justify="center")
251
+ svc_table.add_column("Running", justify="center")
252
+
253
+ for service in services:
254
+ self.results["services"]["total"] += 1
255
+
256
+ # Check if enabled
257
+ enabled_cmd = f"systemctl is-enabled {service} 2>/dev/null"
258
+ enabled_status = self._exec_in_vm(enabled_cmd)
259
+ is_enabled = enabled_status == "enabled"
260
+
261
+ # Check if running
262
+ running_cmd = f"systemctl is-active {service} 2>/dev/null"
263
+ running_status = self._exec_in_vm(running_cmd)
264
+ is_running = running_status == "active"
265
+
266
+ enabled_icon = "[green]✅[/]" if is_enabled else "[yellow]⚠️[/]"
267
+ running_icon = "[green]✅[/]" if is_running else "[red]❌[/]"
268
+
269
+ svc_table.add_row(service, enabled_icon, running_icon)
270
+
271
+ if is_enabled and is_running:
272
+ self.results["services"]["passed"] += 1
273
+ else:
274
+ self.results["services"]["failed"] += 1
275
+
276
+ self.results["services"]["details"].append({
277
+ "service": service,
278
+ "enabled": is_enabled,
279
+ "running": is_running
280
+ })
281
+
282
+ self.console.print(svc_table)
283
+ self.console.print(f"[dim]{self.results['services']['passed']}/{self.results['services']['total']} services active[/]")
284
+
285
+ return self.results["services"]
286
+
287
+ def validate_all(self) -> Dict:
288
+ """Run all validations and return comprehensive results."""
289
+ self.console.print("[bold cyan]🔍 Running Full Validation...[/]")
290
+
291
+ # Check if VM is running
292
+ try:
293
+ result = subprocess.run(
294
+ ["virsh", "--connect", self.conn_uri, "domstate", self.vm_name],
295
+ capture_output=True, text=True, timeout=5
296
+ )
297
+ vm_state = result.stdout.strip()
298
+
299
+ if "running" not in vm_state.lower():
300
+ self.console.print(f"[yellow]⚠️ VM is not running (state: {vm_state})[/]")
301
+ self.console.print("[dim]Start VM with: clonebox start .[/]")
302
+ self.results["overall"] = "vm_not_running"
303
+ return self.results
304
+ except Exception as e:
305
+ self.console.print(f"[red]❌ Cannot check VM state: {e}[/]")
306
+ self.results["overall"] = "error"
307
+ return self.results
308
+
309
+ # Run all validations
310
+ self.validate_mounts()
311
+ self.validate_packages()
312
+ self.validate_snap_packages()
313
+ self.validate_services()
314
+
315
+ # Calculate overall status
316
+ total_checks = (
317
+ self.results["mounts"]["total"] +
318
+ self.results["packages"]["total"] +
319
+ self.results["snap_packages"]["total"] +
320
+ self.results["services"]["total"]
321
+ )
322
+
323
+ total_passed = (
324
+ self.results["mounts"]["passed"] +
325
+ self.results["packages"]["passed"] +
326
+ self.results["snap_packages"]["passed"] +
327
+ self.results["services"]["passed"]
328
+ )
329
+
330
+ total_failed = (
331
+ self.results["mounts"]["failed"] +
332
+ self.results["packages"]["failed"] +
333
+ self.results["snap_packages"]["failed"] +
334
+ self.results["services"]["failed"]
335
+ )
336
+
337
+ # Print summary
338
+ self.console.print("\n[bold]📊 Validation Summary[/]")
339
+ summary_table = Table(border_style="cyan")
340
+ summary_table.add_column("Category", style="bold")
341
+ summary_table.add_column("Passed", justify="right", style="green")
342
+ summary_table.add_column("Failed", justify="right", style="red")
343
+ summary_table.add_column("Total", justify="right")
344
+
345
+ summary_table.add_row("Mounts", str(self.results["mounts"]["passed"]),
346
+ str(self.results["mounts"]["failed"]),
347
+ str(self.results["mounts"]["total"]))
348
+ summary_table.add_row("APT Packages", str(self.results["packages"]["passed"]),
349
+ str(self.results["packages"]["failed"]),
350
+ str(self.results["packages"]["total"]))
351
+ summary_table.add_row("Snap Packages", str(self.results["snap_packages"]["passed"]),
352
+ str(self.results["snap_packages"]["failed"]),
353
+ str(self.results["snap_packages"]["total"]))
354
+ summary_table.add_row("Services", str(self.results["services"]["passed"]),
355
+ str(self.results["services"]["failed"]),
356
+ str(self.results["services"]["total"]))
357
+ summary_table.add_row("[bold]TOTAL", f"[bold green]{total_passed}",
358
+ f"[bold red]{total_failed}", f"[bold]{total_checks}")
359
+
360
+ self.console.print(summary_table)
361
+
362
+ # Determine overall status
363
+ if total_failed == 0 and total_checks > 0:
364
+ self.results["overall"] = "pass"
365
+ self.console.print("\n[bold green]✅ All validations passed![/]")
366
+ elif total_failed > 0:
367
+ self.results["overall"] = "partial"
368
+ self.console.print(f"\n[bold yellow]⚠️ {total_failed}/{total_checks} checks failed[/]")
369
+ self.console.print("[dim]Consider rebuilding VM: clonebox clone . --user --run --replace[/]")
370
+ else:
371
+ self.results["overall"] = "no_checks"
372
+ self.console.print("\n[dim]No validation checks configured[/]")
373
+
374
+ return self.results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -235,6 +235,9 @@ clonebox open . --user
235
235
 
236
236
  # 6. Stop VM when done
237
237
  clonebox stop . --user
238
+
239
+ # 7. Delete VM if needed
240
+ clonebox delete . --user --yes
238
241
  ```
239
242
 
240
243
  ### Development Environment with Browser Profiles
@@ -273,24 +276,33 @@ clonebox test . --user --verbose
273
276
  # ✅ Health check triggered
274
277
  ```
275
278
 
276
- ### VM Health Monitoring
279
+ ### VM Health Monitoring and Mount Validation
277
280
 
278
281
  ```bash
279
- # Check overall status
282
+ # Check overall status including mount validation
280
283
  clonebox status . --user
281
284
 
282
- # Output:
283
- # 📊 Checking VM status: clone-clonebox
284
- # VM State: running
285
- # VM has network access
286
- # ☁️ Cloud-init: Still running (packages installing)
287
- # 🏥 Health Check Status... ⏳ Health check not yet run
288
-
289
- # Trigger health check
285
+ # Output shows:
286
+ # 📊 VM State: running
287
+ # 🔍 Network and IP address
288
+ # ☁️ Cloud-init: Complete
289
+ # 💾 Mount Points status table:
290
+ # ┌─────────────────────────┬──────────────┬────────┐
291
+ # │ Guest Path │ Status │ Files │
292
+ # ├─────────────────────────┼──────────────┼────────┤
293
+ # │ /home/ubuntu/Downloads │ ✅ Mounted │ 199 │
294
+ # │ /home/ubuntu/Documents │ ❌ Not mounted│ ? │
295
+ # │ ~/.config/JetBrains │ ✅ Mounted │ 45 │
296
+ # └─────────────────────────┴──────────────┴────────┘
297
+ # 12/14 mounts active
298
+ # 🏥 Health Check Status: OK
299
+
300
+ # Trigger full health check
290
301
  clonebox status . --user --health
291
302
 
292
- # View detailed health report in VM:
293
- # cat /var/log/clonebox-health.log
303
+ # If mounts are missing, remount or rebuild:
304
+ # In VM: sudo mount -a
305
+ # Or rebuild: clonebox clone . --user --run --replace
294
306
  ```
295
307
 
296
308
  ### Export/Import Workflow
@@ -578,15 +590,17 @@ clonebox clone . --network auto
578
590
  | `clonebox start .` | Start VM from `.clonebox.yaml` in current dir |
579
591
  | `clonebox start . --viewer` | Start VM and open GUI window |
580
592
  | `clonebox start <name>` | Start existing VM by name |
581
- | `clonebox stop <name>` | Stop a VM (graceful shutdown) |
582
- | `clonebox stop -f <name>` | Force stop a VM |
583
- | `clonebox delete <name>` | Delete VM and storage |
593
+ | `clonebox stop .` | Stop VM from `.clonebox.yaml` in current dir |
594
+ | `clonebox stop . -f` | Force stop VM |
595
+ | `clonebox delete .` | Delete VM from `.clonebox.yaml` in current dir |
596
+ | `clonebox delete . --yes` | Delete VM without confirmation |
584
597
  | `clonebox list` | List all VMs |
585
598
  | `clonebox detect` | Show detected services/apps/paths |
586
599
  | `clonebox detect --yaml` | Output as YAML config |
587
600
  | `clonebox detect --yaml --dedupe` | YAML with duplicates removed |
588
601
  | `clonebox detect --json` | Output as JSON |
589
- | `clonebox status . --user` | Check VM health, cloud-init status, and IP address |
602
+ | `clonebox status . --user` | Check VM health, cloud-init, IP, and mount status |
603
+ | `clonebox status . --user --health` | Check VM status and run full health check |
590
604
  | `clonebox test . --user` | Test VM configuration and validate all settings |
591
605
  | `clonebox export . --user` | Export VM for migration to another workstation |
592
606
  | `clonebox export . --user --include-data` | Export VM with browser profiles and configs |
@@ -665,22 +679,57 @@ sudo apt install virt-viewer
665
679
  virt-viewer --connect qemu:///session <vm-name>
666
680
  ```
667
681
 
668
- ### Browser Profiles Not Syncing
682
+ ### Browser Profiles and PyCharm Not Working
669
683
 
670
- If browser profiles or app data aren't available:
684
+ If browser profiles or PyCharm configs aren't available, or you get permission errors:
671
685
 
672
- 1. **Regenerate config with app data:**
673
- ```bash
674
- rm .clonebox.yaml
675
- clonebox clone . --user --run --replace
676
- ```
686
+ **Root cause:** VM was created with old version without proper mount permissions.
677
687
 
678
- 2. **Check mount permissions in VM:**
679
- ```bash
680
- # Verify mounts are accessible
681
- ls -la ~/.config/google-chrome
682
- ls -la ~/.mozilla/firefox
683
- ```
688
+ **Solution - Rebuild VM with latest fixes:**
689
+
690
+ ```bash
691
+ # Stop and delete old VM
692
+ clonebox stop . --user
693
+ clonebox delete . --user --yes
694
+
695
+ # Recreate VM with fixed permissions and app data mounts
696
+ clonebox clone . --user --run --replace
697
+ ```
698
+
699
+ **After rebuild, verify mounts in VM:**
700
+ ```bash
701
+ # Check all mounts are accessible
702
+ ls ~/.config/google-chrome # Chrome profile
703
+ ls ~/.mozilla/firefox # Firefox profile
704
+ ls ~/.config/JetBrains # PyCharm settings
705
+ ls ~/Downloads # Downloads folder
706
+ ls ~/Documents # Documents folder
707
+ ```
708
+
709
+ **What changed in v0.1.12:**
710
+ - All mounts use `uid=1000,gid=1000` for ubuntu user access
711
+ - Both `paths` and `app_data_paths` are properly mounted
712
+ - No sudo needed to access any shared directories
713
+
714
+ ### Mount Points Empty or Permission Denied
715
+
716
+ If you get "must be superuser to use mount" error when accessing Downloads/Documents:
717
+
718
+ **Solution:** VM was created with old mount configuration. Recreate VM:
719
+
720
+ ```bash
721
+ # Stop and delete old VM
722
+ clonebox stop . --user
723
+ clonebox delete . --user --yes
724
+
725
+ # Recreate with fixed permissions
726
+ clonebox clone . --user --run --replace
727
+ ```
728
+
729
+ **What was fixed:**
730
+ - Mounts now use `uid=1000,gid=1000` so ubuntu user has access
731
+ - No need for sudo to access shared directories
732
+ - Applies to new VMs created after v0.1.12
684
733
 
685
734
  ### Mount Points Empty After Reboot
686
735
 
@@ -698,7 +747,7 @@ If shared directories appear empty after VM restart:
698
747
 
699
748
  3. **Verify access mode:**
700
749
  - VMs created with `accessmode="mapped"` allow any user to access mounts
701
- - Older VMs used `accessmode="passthrough"` which preserves host UIDs
750
+ - Mount options include `uid=1000,gid=1000` for user access
702
751
 
703
752
  ## Advanced Usage
704
753
 
@@ -761,6 +810,160 @@ virsh --connect qemu:///session console clone-clonebox
761
810
  # Press Ctrl + ] to exit console
762
811
  ```
763
812
 
813
+ ## Exporting to Proxmox
814
+
815
+ To use CloneBox VMs in Proxmox, you need to convert the qcow2 disk image to Proxmox format.
816
+
817
+ ### Step 1: Locate VM Disk Image
818
+
819
+ ```bash
820
+ # Find VM disk location
821
+ clonebox list
822
+
823
+ # Check VM details for disk path
824
+ virsh --connect qemu:///session dominfo clone-clonebox
825
+
826
+ # Typical locations:
827
+ # User session: ~/.local/share/libvirt/images/<vm-name>/<vm-name>.qcow2
828
+ # System session: /var/lib/libvirt/images/<vm-name>/<vm-name>.qcow2
829
+ ```
830
+
831
+ ### Step 2: Export VM with CloneBox
832
+
833
+ ```bash
834
+ # Export VM with all data (from current directory with .clonebox.yaml)
835
+ clonebox export . --user --include-data -o clonebox-vm.tar.gz
836
+
837
+ # Or export specific VM by name
838
+ clonebox export safetytwin-vm --include-data -o safetytwin.tar.gz
839
+
840
+ # Extract to get the disk image
841
+ tar -xzf clonebox-vm.tar.gz
842
+ cd clonebox-clonebox
843
+ ls -la # Should show disk.qcow2, vm.xml, etc.
844
+ ```
845
+
846
+ ### Step 3: Convert to Proxmox Format
847
+
848
+ ```bash
849
+ # Install qemu-utils if not installed
850
+ sudo apt install qemu-utils
851
+
852
+ # Convert qcow2 to raw format (Proxmox preferred)
853
+ qemu-img convert -f qcow2 -O raw disk.qcow2 vm-disk.raw
854
+
855
+ # Or convert to qcow2 with compression for smaller size
856
+ qemu-img convert -f qcow2 -O qcow2 -c disk.qcow2 vm-disk-compressed.qcow2
857
+ ```
858
+
859
+ ### Step 4: Transfer to Proxmox Host
860
+
861
+ ```bash
862
+ # Using scp (replace with your Proxmox host IP)
863
+ scp vm-disk.raw root@proxmox:/var/lib/vz/template/iso/
864
+
865
+ # Or using rsync for large files
866
+ rsync -avh --progress vm-disk.raw root@proxmox:/var/lib/vz/template/iso/
867
+ ```
868
+
869
+ ### Step 5: Create VM in Proxmox
870
+
871
+ 1. **Log into Proxmox Web UI**
872
+
873
+ 2. **Create new VM:**
874
+ - Click "Create VM"
875
+ - Enter VM ID and Name
876
+ - Set OS: "Do not use any media"
877
+
878
+ 3. **Configure Hardware:**
879
+ - **Hard Disk:**
880
+ - Delete default disk
881
+ - Click "Add" → "Hard Disk"
882
+ - Select your uploaded image file
883
+ - Set Disk size (can be larger than image)
884
+ - Set Bus: "VirtIO SCSI"
885
+ - Set Cache: "Write back" for better performance
886
+
887
+ 4. **CPU & Memory:**
888
+ - Set CPU cores (match original VM config)
889
+ - Set Memory (match original VM config)
890
+
891
+ 5. **Network:**
892
+ - Set Model: "VirtIO (paravirtualized)"
893
+
894
+ 6. **Confirm:** Click "Finish" to create VM
895
+
896
+ ### Step 6: Post-Import Configuration
897
+
898
+ 1. **Start the VM in Proxmox**
899
+
900
+ 2. **Update network configuration:**
901
+ ```bash
902
+ # In VM console, update network interfaces
903
+ sudo nano /etc/netplan/01-netcfg.yaml
904
+
905
+ # Example for Proxmox bridge:
906
+ network:
907
+ version: 2
908
+ renderer: networkd
909
+ ethernets:
910
+ ens18: # Proxmox typically uses ens18
911
+ dhcp4: true
912
+ ```
913
+
914
+ 3. **Apply network changes:**
915
+ ```bash
916
+ sudo netplan apply
917
+ ```
918
+
919
+ 4. **Update mount points (if needed):**
920
+ ```bash
921
+ # Mount points will fail in Proxmox, remove them
922
+ sudo nano /etc/fstab
923
+ # Comment out or remove 9p mount entries
924
+
925
+ # Reboot to apply changes
926
+ sudo reboot
927
+ ```
928
+
929
+ ### Alternative: Direct Import to Proxmox Storage
930
+
931
+ If you have Proxmox with shared storage:
932
+
933
+ ```bash
934
+ # On Proxmox host
935
+ # Create a temporary directory
936
+ mkdir /tmp/import
937
+
938
+ # Copy disk directly to Proxmox storage (example for local-lvm)
939
+ scp vm-disk.raw root@proxmox:/tmp/import/
940
+
941
+ # On Proxmox host, create VM using CLI
942
+ qm create 9000 --name clonebox-vm --memory 4096 --cores 4 --net0 virtio,bridge=vmbr0
943
+
944
+ # Import disk to VM
945
+ qm importdisk 9000 /tmp/import/vm-disk.raw local-lvm
946
+
947
+ # Attach disk to VM
948
+ qm set 9000 --scsihw virtio-scsi-pci --scsi0 local-lvm:vm-9000-disk-0
949
+
950
+ # Set boot disk
951
+ qm set 9000 --boot c --bootdisk scsi0
952
+ ```
953
+
954
+ ### Troubleshooting
955
+
956
+ - **VM won't boot:** Check if disk format is compatible (raw is safest)
957
+ - **Network not working:** Update network configuration for Proxmox's NIC naming
958
+ - **Performance issues:** Use VirtIO drivers and set cache to "Write back"
959
+ - **Mount errors:** Remove 9p mount entries from /etc/fstab as they won't work in Proxmox
960
+
961
+ ### Notes
962
+
963
+ - CloneBox's bind mounts (9p filesystem) are specific to libvirt/QEMU and won't work in Proxmox
964
+ - Browser profiles and app data exported with `--include-data` will be available in the VM disk
965
+ - For shared folders in Proxmox, use Proxmox's shared folders or network shares instead
966
+
764
967
  ## License
765
968
 
766
969
  MIT License - see [LICENSE](LICENSE) file.
@@ -0,0 +1,12 @@
1
+ clonebox/__init__.py,sha256=C1J7Uwrp8H9Zopo5JgrQYzXg-PWls1JdqmE_0Qp1Tro,408
2
+ clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
+ clonebox/cli.py,sha256=IWTJjC5o3GPDZqQVnWNa8SHq1zKaK-fLGRASTehGe3Y,84733
4
+ clonebox/cloner.py,sha256=fVfphsPbsqW4ASnv4bkrDIL8Ks9aPUvxx-IOO_d2FTw,32102
5
+ clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
+ clonebox/validator.py,sha256=8HV3ahfiLkFDOH4UOmZr7-fGfhKep1Jlw1joJeWSaQE,15858
7
+ clonebox-0.1.14.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
+ clonebox-0.1.14.dist-info/METADATA,sha256=RGcUEs9xELSw6zii3s6qGqoYsiaOQNVo-CB5xK9N7Vw,30824
9
+ clonebox-0.1.14.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
10
+ clonebox-0.1.14.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
11
+ clonebox-0.1.14.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
12
+ clonebox-0.1.14.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- clonebox/__init__.py,sha256=J9nobWleYOk3tqbPsL6g-f_F7B3LwJTjFtnQ6qwmi8Y,408
2
- clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
3
- clonebox/cli.py,sha256=NFScoojeI1XJ982SuNt01iW52hHIYSttGy2UzZuJCCQ,76413
4
- clonebox/cloner.py,sha256=0puM04SzifccPfIVqc2CXFFcdNLWKpbiXXbBplrm9s8,31850
5
- clonebox/detector.py,sha256=4fu04Ty6KC82WkcJZ5UL5TqXpWYE7Kb7R0uJ-9dtbCk,21635
6
- clonebox-0.1.12.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
- clonebox-0.1.12.dist-info/METADATA,sha256=SJiYq8FYnqDhVcwZ00GeOTGNeAMjoqw-_e5e9XgMePo,24526
8
- clonebox-0.1.12.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
- clonebox-0.1.12.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
10
- clonebox-0.1.12.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
11
- clonebox-0.1.12.dist-info/RECORD,,