clonebox 0.1.13__tar.gz → 0.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.13
3
+ Version: 0.1.14
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -276,24 +276,33 @@ clonebox test . --user --verbose
276
276
  # ✅ Health check triggered
277
277
  ```
278
278
 
279
- ### VM Health Monitoring
279
+ ### VM Health Monitoring and Mount Validation
280
280
 
281
281
  ```bash
282
- # Check overall status
282
+ # Check overall status including mount validation
283
283
  clonebox status . --user
284
284
 
285
- # Output:
286
- # 📊 Checking VM status: clone-clonebox
287
- # VM State: running
288
- # VM has network access
289
- # ☁️ Cloud-init: Still running (packages installing)
290
- # 🏥 Health Check Status... ⏳ Health check not yet run
291
-
292
- # Trigger health check
285
+ # Output shows:
286
+ # 📊 VM State: running
287
+ # 🔍 Network and IP address
288
+ # ☁️ Cloud-init: Complete
289
+ # 💾 Mount Points status table:
290
+ # ┌─────────────────────────┬──────────────┬────────┐
291
+ # │ Guest Path │ Status │ Files │
292
+ # ├─────────────────────────┼──────────────┼────────┤
293
+ # │ /home/ubuntu/Downloads │ ✅ Mounted │ 199 │
294
+ # │ /home/ubuntu/Documents │ ❌ Not mounted│ ? │
295
+ # │ ~/.config/JetBrains │ ✅ Mounted │ 45 │
296
+ # └─────────────────────────┴──────────────┴────────┘
297
+ # 12/14 mounts active
298
+ # 🏥 Health Check Status: OK
299
+
300
+ # Trigger full health check
293
301
  clonebox status . --user --health
294
302
 
295
- # View detailed health report in VM:
296
- # cat /var/log/clonebox-health.log
303
+ # If mounts are missing, remount or rebuild:
304
+ # In VM: sudo mount -a
305
+ # Or rebuild: clonebox clone . --user --run --replace
297
306
  ```
298
307
 
299
308
  ### Export/Import Workflow
@@ -590,7 +599,8 @@ clonebox clone . --network auto
590
599
  | `clonebox detect --yaml` | Output as YAML config |
591
600
  | `clonebox detect --yaml --dedupe` | YAML with duplicates removed |
592
601
  | `clonebox detect --json` | Output as JSON |
593
- | `clonebox status . --user` | Check VM health, cloud-init status, and IP address |
602
+ | `clonebox status . --user` | Check VM health, cloud-init, IP, and mount status |
603
+ | `clonebox status . --user --health` | Check VM status and run full health check |
594
604
  | `clonebox test . --user` | Test VM configuration and validate all settings |
595
605
  | `clonebox export . --user` | Export VM for migration to another workstation |
596
606
  | `clonebox export . --user --include-data` | Export VM with browser profiles and configs |
@@ -237,24 +237,33 @@ clonebox test . --user --verbose
237
237
  # ✅ Health check triggered
238
238
  ```
239
239
 
240
- ### VM Health Monitoring
240
+ ### VM Health Monitoring and Mount Validation
241
241
 
242
242
  ```bash
243
- # Check overall status
243
+ # Check overall status including mount validation
244
244
  clonebox status . --user
245
245
 
246
- # Output:
247
- # 📊 Checking VM status: clone-clonebox
248
- # VM State: running
249
- # VM has network access
250
- # ☁️ Cloud-init: Still running (packages installing)
251
- # 🏥 Health Check Status... ⏳ Health check not yet run
252
-
253
- # Trigger health check
246
+ # Output shows:
247
+ # 📊 VM State: running
248
+ # 🔍 Network and IP address
249
+ # ☁️ Cloud-init: Complete
250
+ # 💾 Mount Points status table:
251
+ # ┌─────────────────────────┬──────────────┬────────┐
252
+ # │ Guest Path │ Status │ Files │
253
+ # ├─────────────────────────┼──────────────┼────────┤
254
+ # │ /home/ubuntu/Downloads │ ✅ Mounted │ 199 │
255
+ # │ /home/ubuntu/Documents │ ❌ Not mounted│ ? │
256
+ # │ ~/.config/JetBrains │ ✅ Mounted │ 45 │
257
+ # └─────────────────────────┴──────────────┴────────┘
258
+ # 12/14 mounts active
259
+ # 🏥 Health Check Status: OK
260
+
261
+ # Trigger full health check
254
262
  clonebox status . --user --health
255
263
 
256
- # View detailed health report in VM:
257
- # cat /var/log/clonebox-health.log
264
+ # If mounts are missing, remount or rebuild:
265
+ # In VM: sudo mount -a
266
+ # Or rebuild: clonebox clone . --user --run --replace
258
267
  ```
259
268
 
260
269
  ### Export/Import Workflow
@@ -551,7 +560,8 @@ clonebox clone . --network auto
551
560
  | `clonebox detect --yaml` | Output as YAML config |
552
561
  | `clonebox detect --yaml --dedupe` | YAML with duplicates removed |
553
562
  | `clonebox detect --json` | Output as JSON |
554
- | `clonebox status . --user` | Check VM health, cloud-init status, and IP address |
563
+ | `clonebox status . --user` | Check VM health, cloud-init, IP, and mount status |
564
+ | `clonebox status . --user --health` | Check VM status and run full health check |
555
565
  | `clonebox test . --user` | Test VM configuration and validate all settings |
556
566
  | `clonebox export . --user` | Export VM for migration to another workstation |
557
567
  | `clonebox export . --user --include-data` | Export VM with browser profiles and configs |
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "clonebox"
7
- version = "0.1.13"
7
+ version = "0.1.14"
8
8
  description = "Clone your workstation environment to an isolated VM with selective apps, paths and services"
9
9
  readme = "README.md"
10
10
  license = {text = "Apache-2.0"}
@@ -672,6 +672,109 @@ def cmd_status(args):
672
672
  except Exception:
673
673
  console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
674
674
 
675
+ # Check mount status
676
+ console.print("\n[bold]💾 Checking mount status...[/]")
677
+ try:
678
+ # Load config to get expected mounts
679
+ config_file = Path.cwd() / ".clonebox.yaml"
680
+ if config_file.exists():
681
+ config = load_clonebox_config(config_file)
682
+ all_paths = config.get("paths", {}).copy()
683
+ all_paths.update(config.get("app_data_paths", {}))
684
+
685
+ if all_paths:
686
+ # Check which mounts are active
687
+ result = subprocess.run(
688
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
689
+ '{"execute":"guest-exec","arguments":{"path":"/bin/sh","arg":["-c","mount | grep 9p"],"capture-output":true}}'],
690
+ capture_output=True, text=True, timeout=10
691
+ )
692
+
693
+ mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
694
+ mount_table.add_column("Guest Path", style="bold")
695
+ mount_table.add_column("Status", justify="center")
696
+ mount_table.add_column("Files", justify="right")
697
+
698
+ mounted_paths = []
699
+ if result.returncode == 0 and "return" in result.stdout:
700
+ # Parse guest-exec response for mount output
701
+ import json
702
+ try:
703
+ resp = json.loads(result.stdout)
704
+ if "return" in resp and "pid" in resp["return"]:
705
+ # Get the output from guest-exec-status
706
+ pid = resp["return"]["pid"]
707
+ status_result = subprocess.run(
708
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
709
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
710
+ capture_output=True, text=True, timeout=5
711
+ )
712
+ if status_result.returncode == 0:
713
+ status_resp = json.loads(status_result.stdout)
714
+ if "return" in status_resp and "out-data" in status_resp["return"]:
715
+ import base64
716
+ mount_output = base64.b64decode(status_resp["return"]["out-data"]).decode()
717
+ mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
718
+ except:
719
+ pass
720
+
721
+ # Check each expected mount
722
+ working_mounts = 0
723
+ total_mounts = 0
724
+ for host_path, guest_path in all_paths.items():
725
+ total_mounts += 1
726
+ is_mounted = any(guest_path in mp for mp in mounted_paths)
727
+
728
+ # Try to get file count
729
+ file_count = "?"
730
+ if is_mounted:
731
+ try:
732
+ count_result = subprocess.run(
733
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
734
+ f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","ls -A {guest_path} 2>/dev/null | wc -l"],"capture-output":true}}}}'],
735
+ capture_output=True, text=True, timeout=5
736
+ )
737
+ if count_result.returncode == 0:
738
+ resp = json.loads(count_result.stdout)
739
+ if "return" in resp and "pid" in resp["return"]:
740
+ pid = resp["return"]["pid"]
741
+ import time
742
+ time.sleep(0.5)
743
+ status_result = subprocess.run(
744
+ ["virsh", "--connect", conn_uri, "qemu-agent-command", name,
745
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
746
+ capture_output=True, text=True, timeout=5
747
+ )
748
+ if status_result.returncode == 0:
749
+ status_resp = json.loads(status_result.stdout)
750
+ if "return" in status_resp and "out-data" in status_resp["return"]:
751
+ file_count = base64.b64decode(status_resp["return"]["out-data"]).decode().strip()
752
+ except:
753
+ pass
754
+
755
+ if is_mounted:
756
+ status = "[green]✅ Mounted[/]"
757
+ working_mounts += 1
758
+ else:
759
+ status = "[red]❌ Not mounted[/]"
760
+
761
+ mount_table.add_row(guest_path, status, str(file_count))
762
+
763
+ console.print(mount_table)
764
+ console.print(f"[dim]{working_mounts}/{total_mounts} mounts active[/]")
765
+
766
+ if working_mounts < total_mounts:
767
+ console.print("[yellow]⚠️ Some mounts are missing. Try remounting in VM:[/]")
768
+ console.print("[dim] sudo mount -a[/]")
769
+ console.print("[dim]Or rebuild VM with: clonebox clone . --user --run --replace[/]")
770
+ else:
771
+ console.print("[dim]No mount points configured[/]")
772
+ else:
773
+ console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
774
+ except Exception as e:
775
+ console.print(f"[yellow]⚠️ Cannot check mounts: {e}[/]")
776
+ console.print("[dim]QEMU guest agent may not be ready yet[/]")
777
+
675
778
  # Check health status if available
676
779
  console.print("\n[bold]🏥 Health Check Status...[/]")
677
780
  try:
@@ -1042,11 +1145,13 @@ def cmd_test(args):
1042
1145
  """Test VM configuration and health."""
1043
1146
  import subprocess
1044
1147
  import json
1148
+ from clonebox.validator import VMValidator
1045
1149
 
1046
1150
  name = args.name
1047
1151
  user_session = getattr(args, "user", False)
1048
1152
  quick = getattr(args, "quick", False)
1049
1153
  verbose = getattr(args, "verbose", False)
1154
+ validate_all = getattr(args, "validate", False)
1050
1155
  conn_uri = "qemu:///session" if user_session else "qemu:///system"
1051
1156
 
1052
1157
  # If name is a path, load config
@@ -0,0 +1,374 @@
1
+ """
2
+ VM validation module - validates VM state against YAML configuration.
3
+ """
4
+ import subprocess
5
+ import json
6
+ import base64
7
+ import time
8
+ from typing import Dict, List, Tuple, Optional
9
+ from pathlib import Path
10
+ from rich.console import Console
11
+ from rich.table import Table
12
+
13
+
14
+ class VMValidator:
15
+ """Validates VM configuration against expected state from YAML."""
16
+
17
+ def __init__(self, config: dict, vm_name: str, conn_uri: str, console: Console = None):
18
+ self.config = config
19
+ self.vm_name = vm_name
20
+ self.conn_uri = conn_uri
21
+ self.console = console or Console()
22
+ self.results = {
23
+ "mounts": {"passed": 0, "failed": 0, "total": 0, "details": []},
24
+ "packages": {"passed": 0, "failed": 0, "total": 0, "details": []},
25
+ "snap_packages": {"passed": 0, "failed": 0, "total": 0, "details": []},
26
+ "services": {"passed": 0, "failed": 0, "total": 0, "details": []},
27
+ "overall": "unknown"
28
+ }
29
+
30
+ def _exec_in_vm(self, command: str, timeout: int = 10) -> Optional[str]:
31
+ """Execute command in VM using QEMU guest agent."""
32
+ try:
33
+ # Execute command
34
+ result = subprocess.run(
35
+ ["virsh", "--connect", self.conn_uri, "qemu-agent-command", self.vm_name,
36
+ f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","{command}"],"capture-output":true}}}}'],
37
+ capture_output=True, text=True, timeout=timeout
38
+ )
39
+
40
+ if result.returncode != 0:
41
+ return None
42
+
43
+ response = json.loads(result.stdout)
44
+ if "return" not in response or "pid" not in response["return"]:
45
+ return None
46
+
47
+ pid = response["return"]["pid"]
48
+
49
+ # Wait a bit for command to complete
50
+ time.sleep(0.3)
51
+
52
+ # Get result
53
+ status_result = subprocess.run(
54
+ ["virsh", "--connect", self.conn_uri, "qemu-agent-command", self.vm_name,
55
+ f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
56
+ capture_output=True, text=True, timeout=5
57
+ )
58
+
59
+ if status_result.returncode != 0:
60
+ return None
61
+
62
+ status_resp = json.loads(status_result.stdout)
63
+ if "return" not in status_resp:
64
+ return None
65
+
66
+ ret = status_resp["return"]
67
+ if not ret.get("exited", False):
68
+ return None
69
+
70
+ if "out-data" in ret:
71
+ return base64.b64decode(ret["out-data"]).decode().strip()
72
+
73
+ return ""
74
+
75
+ except Exception:
76
+ return None
77
+
78
+ def validate_mounts(self) -> Dict:
79
+ """Validate all mount points are accessible and contain data."""
80
+ self.console.print("\n[bold]💾 Validating Mount Points...[/]")
81
+
82
+ all_paths = self.config.get("paths", {}).copy()
83
+ all_paths.update(self.config.get("app_data_paths", {}))
84
+
85
+ if not all_paths:
86
+ self.console.print("[dim]No mount points configured[/]")
87
+ return self.results["mounts"]
88
+
89
+ # Get mounted filesystems
90
+ mount_output = self._exec_in_vm("mount | grep 9p")
91
+ mounted_paths = []
92
+ if mount_output:
93
+ mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
94
+
95
+ mount_table = Table(title="Mount Validation", border_style="cyan")
96
+ mount_table.add_column("Guest Path", style="bold")
97
+ mount_table.add_column("Mounted", justify="center")
98
+ mount_table.add_column("Accessible", justify="center")
99
+ mount_table.add_column("Files", justify="right")
100
+
101
+ for host_path, guest_path in all_paths.items():
102
+ self.results["mounts"]["total"] += 1
103
+
104
+ # Check if mounted
105
+ is_mounted = any(guest_path in mp for mp in mounted_paths)
106
+
107
+ # Check if accessible
108
+ accessible = False
109
+ file_count = "?"
110
+
111
+ if is_mounted:
112
+ test_result = self._exec_in_vm(f"test -d {guest_path} && echo 'yes' || echo 'no'")
113
+ accessible = test_result == "yes"
114
+
115
+ if accessible:
116
+ # Get file count
117
+ count_str = self._exec_in_vm(f"ls -A {guest_path} 2>/dev/null | wc -l")
118
+ if count_str and count_str.isdigit():
119
+ file_count = count_str
120
+
121
+ # Determine status
122
+ if is_mounted and accessible:
123
+ mount_status = "[green]✅[/]"
124
+ access_status = "[green]✅[/]"
125
+ self.results["mounts"]["passed"] += 1
126
+ status = "pass"
127
+ elif is_mounted:
128
+ mount_status = "[green]✅[/]"
129
+ access_status = "[red]❌[/]"
130
+ self.results["mounts"]["failed"] += 1
131
+ status = "mounted_but_inaccessible"
132
+ else:
133
+ mount_status = "[red]❌[/]"
134
+ access_status = "[dim]N/A[/]"
135
+ self.results["mounts"]["failed"] += 1
136
+ status = "not_mounted"
137
+
138
+ mount_table.add_row(guest_path, mount_status, access_status, str(file_count))
139
+
140
+ self.results["mounts"]["details"].append({
141
+ "path": guest_path,
142
+ "mounted": is_mounted,
143
+ "accessible": accessible,
144
+ "files": file_count,
145
+ "status": status
146
+ })
147
+
148
+ self.console.print(mount_table)
149
+ self.console.print(f"[dim]{self.results['mounts']['passed']}/{self.results['mounts']['total']} mounts working[/]")
150
+
151
+ return self.results["mounts"]
152
+
153
+ def validate_packages(self) -> Dict:
154
+ """Validate APT packages are installed."""
155
+ self.console.print("\n[bold]📦 Validating APT Packages...[/]")
156
+
157
+ packages = self.config.get("packages", [])
158
+ if not packages:
159
+ self.console.print("[dim]No APT packages configured[/]")
160
+ return self.results["packages"]
161
+
162
+ pkg_table = Table(title="Package Validation", border_style="cyan")
163
+ pkg_table.add_column("Package", style="bold")
164
+ pkg_table.add_column("Status", justify="center")
165
+ pkg_table.add_column("Version", style="dim")
166
+
167
+ for package in packages:
168
+ self.results["packages"]["total"] += 1
169
+
170
+ # Check if installed
171
+ check_cmd = f"dpkg -l | grep -E '^ii {package}' | awk '{{print $3}}'"
172
+ version = self._exec_in_vm(check_cmd)
173
+
174
+ if version:
175
+ pkg_table.add_row(package, "[green]✅ Installed[/]", version[:40])
176
+ self.results["packages"]["passed"] += 1
177
+ self.results["packages"]["details"].append({
178
+ "package": package,
179
+ "installed": True,
180
+ "version": version
181
+ })
182
+ else:
183
+ pkg_table.add_row(package, "[red]❌ Missing[/]", "")
184
+ self.results["packages"]["failed"] += 1
185
+ self.results["packages"]["details"].append({
186
+ "package": package,
187
+ "installed": False,
188
+ "version": None
189
+ })
190
+
191
+ self.console.print(pkg_table)
192
+ self.console.print(f"[dim]{self.results['packages']['passed']}/{self.results['packages']['total']} packages installed[/]")
193
+
194
+ return self.results["packages"]
195
+
196
+ def validate_snap_packages(self) -> Dict:
197
+ """Validate snap packages are installed."""
198
+ self.console.print("\n[bold]📦 Validating Snap Packages...[/]")
199
+
200
+ snap_packages = self.config.get("snap_packages", [])
201
+ if not snap_packages:
202
+ self.console.print("[dim]No snap packages configured[/]")
203
+ return self.results["snap_packages"]
204
+
205
+ snap_table = Table(title="Snap Package Validation", border_style="cyan")
206
+ snap_table.add_column("Package", style="bold")
207
+ snap_table.add_column("Status", justify="center")
208
+ snap_table.add_column("Version", style="dim")
209
+
210
+ for package in snap_packages:
211
+ self.results["snap_packages"]["total"] += 1
212
+
213
+ # Check if installed
214
+ check_cmd = f"snap list | grep '^{package}' | awk '{{print $2}}'"
215
+ version = self._exec_in_vm(check_cmd)
216
+
217
+ if version:
218
+ snap_table.add_row(package, "[green]✅ Installed[/]", version[:40])
219
+ self.results["snap_packages"]["passed"] += 1
220
+ self.results["snap_packages"]["details"].append({
221
+ "package": package,
222
+ "installed": True,
223
+ "version": version
224
+ })
225
+ else:
226
+ snap_table.add_row(package, "[red]❌ Missing[/]", "")
227
+ self.results["snap_packages"]["failed"] += 1
228
+ self.results["snap_packages"]["details"].append({
229
+ "package": package,
230
+ "installed": False,
231
+ "version": None
232
+ })
233
+
234
+ self.console.print(snap_table)
235
+ self.console.print(f"[dim]{self.results['snap_packages']['passed']}/{self.results['snap_packages']['total']} snap packages installed[/]")
236
+
237
+ return self.results["snap_packages"]
238
+
239
+ def validate_services(self) -> Dict:
240
+ """Validate services are enabled and running."""
241
+ self.console.print("\n[bold]⚙️ Validating Services...[/]")
242
+
243
+ services = self.config.get("services", [])
244
+ if not services:
245
+ self.console.print("[dim]No services configured[/]")
246
+ return self.results["services"]
247
+
248
+ svc_table = Table(title="Service Validation", border_style="cyan")
249
+ svc_table.add_column("Service", style="bold")
250
+ svc_table.add_column("Enabled", justify="center")
251
+ svc_table.add_column("Running", justify="center")
252
+
253
+ for service in services:
254
+ self.results["services"]["total"] += 1
255
+
256
+ # Check if enabled
257
+ enabled_cmd = f"systemctl is-enabled {service} 2>/dev/null"
258
+ enabled_status = self._exec_in_vm(enabled_cmd)
259
+ is_enabled = enabled_status == "enabled"
260
+
261
+ # Check if running
262
+ running_cmd = f"systemctl is-active {service} 2>/dev/null"
263
+ running_status = self._exec_in_vm(running_cmd)
264
+ is_running = running_status == "active"
265
+
266
+ enabled_icon = "[green]✅[/]" if is_enabled else "[yellow]⚠️[/]"
267
+ running_icon = "[green]✅[/]" if is_running else "[red]❌[/]"
268
+
269
+ svc_table.add_row(service, enabled_icon, running_icon)
270
+
271
+ if is_enabled and is_running:
272
+ self.results["services"]["passed"] += 1
273
+ else:
274
+ self.results["services"]["failed"] += 1
275
+
276
+ self.results["services"]["details"].append({
277
+ "service": service,
278
+ "enabled": is_enabled,
279
+ "running": is_running
280
+ })
281
+
282
+ self.console.print(svc_table)
283
+ self.console.print(f"[dim]{self.results['services']['passed']}/{self.results['services']['total']} services active[/]")
284
+
285
+ return self.results["services"]
286
+
287
+ def validate_all(self) -> Dict:
288
+ """Run all validations and return comprehensive results."""
289
+ self.console.print("[bold cyan]🔍 Running Full Validation...[/]")
290
+
291
+ # Check if VM is running
292
+ try:
293
+ result = subprocess.run(
294
+ ["virsh", "--connect", self.conn_uri, "domstate", self.vm_name],
295
+ capture_output=True, text=True, timeout=5
296
+ )
297
+ vm_state = result.stdout.strip()
298
+
299
+ if "running" not in vm_state.lower():
300
+ self.console.print(f"[yellow]⚠️ VM is not running (state: {vm_state})[/]")
301
+ self.console.print("[dim]Start VM with: clonebox start .[/]")
302
+ self.results["overall"] = "vm_not_running"
303
+ return self.results
304
+ except Exception as e:
305
+ self.console.print(f"[red]❌ Cannot check VM state: {e}[/]")
306
+ self.results["overall"] = "error"
307
+ return self.results
308
+
309
+ # Run all validations
310
+ self.validate_mounts()
311
+ self.validate_packages()
312
+ self.validate_snap_packages()
313
+ self.validate_services()
314
+
315
+ # Calculate overall status
316
+ total_checks = (
317
+ self.results["mounts"]["total"] +
318
+ self.results["packages"]["total"] +
319
+ self.results["snap_packages"]["total"] +
320
+ self.results["services"]["total"]
321
+ )
322
+
323
+ total_passed = (
324
+ self.results["mounts"]["passed"] +
325
+ self.results["packages"]["passed"] +
326
+ self.results["snap_packages"]["passed"] +
327
+ self.results["services"]["passed"]
328
+ )
329
+
330
+ total_failed = (
331
+ self.results["mounts"]["failed"] +
332
+ self.results["packages"]["failed"] +
333
+ self.results["snap_packages"]["failed"] +
334
+ self.results["services"]["failed"]
335
+ )
336
+
337
+ # Print summary
338
+ self.console.print("\n[bold]📊 Validation Summary[/]")
339
+ summary_table = Table(border_style="cyan")
340
+ summary_table.add_column("Category", style="bold")
341
+ summary_table.add_column("Passed", justify="right", style="green")
342
+ summary_table.add_column("Failed", justify="right", style="red")
343
+ summary_table.add_column("Total", justify="right")
344
+
345
+ summary_table.add_row("Mounts", str(self.results["mounts"]["passed"]),
346
+ str(self.results["mounts"]["failed"]),
347
+ str(self.results["mounts"]["total"]))
348
+ summary_table.add_row("APT Packages", str(self.results["packages"]["passed"]),
349
+ str(self.results["packages"]["failed"]),
350
+ str(self.results["packages"]["total"]))
351
+ summary_table.add_row("Snap Packages", str(self.results["snap_packages"]["passed"]),
352
+ str(self.results["snap_packages"]["failed"]),
353
+ str(self.results["snap_packages"]["total"]))
354
+ summary_table.add_row("Services", str(self.results["services"]["passed"]),
355
+ str(self.results["services"]["failed"]),
356
+ str(self.results["services"]["total"]))
357
+ summary_table.add_row("[bold]TOTAL", f"[bold green]{total_passed}",
358
+ f"[bold red]{total_failed}", f"[bold]{total_checks}")
359
+
360
+ self.console.print(summary_table)
361
+
362
+ # Determine overall status
363
+ if total_failed == 0 and total_checks > 0:
364
+ self.results["overall"] = "pass"
365
+ self.console.print("\n[bold green]✅ All validations passed![/]")
366
+ elif total_failed > 0:
367
+ self.results["overall"] = "partial"
368
+ self.console.print(f"\n[bold yellow]⚠️ {total_failed}/{total_checks} checks failed[/]")
369
+ self.console.print("[dim]Consider rebuilding VM: clonebox clone . --user --run --replace[/]")
370
+ else:
371
+ self.results["overall"] = "no_checks"
372
+ self.console.print("\n[dim]No validation checks configured[/]")
373
+
374
+ return self.results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clonebox
3
- Version: 0.1.13
3
+ Version: 0.1.14
4
4
  Summary: Clone your workstation environment to an isolated VM with selective apps, paths and services
5
5
  Author: CloneBox Team
6
6
  License: Apache-2.0
@@ -276,24 +276,33 @@ clonebox test . --user --verbose
276
276
  # ✅ Health check triggered
277
277
  ```
278
278
 
279
- ### VM Health Monitoring
279
+ ### VM Health Monitoring and Mount Validation
280
280
 
281
281
  ```bash
282
- # Check overall status
282
+ # Check overall status including mount validation
283
283
  clonebox status . --user
284
284
 
285
- # Output:
286
- # 📊 Checking VM status: clone-clonebox
287
- # VM State: running
288
- # VM has network access
289
- # ☁️ Cloud-init: Still running (packages installing)
290
- # 🏥 Health Check Status... ⏳ Health check not yet run
291
-
292
- # Trigger health check
285
+ # Output shows:
286
+ # 📊 VM State: running
287
+ # 🔍 Network and IP address
288
+ # ☁️ Cloud-init: Complete
289
+ # 💾 Mount Points status table:
290
+ # ┌─────────────────────────┬──────────────┬────────┐
291
+ # │ Guest Path │ Status │ Files │
292
+ # ├─────────────────────────┼──────────────┼────────┤
293
+ # │ /home/ubuntu/Downloads │ ✅ Mounted │ 199 │
294
+ # │ /home/ubuntu/Documents │ ❌ Not mounted│ ? │
295
+ # │ ~/.config/JetBrains │ ✅ Mounted │ 45 │
296
+ # └─────────────────────────┴──────────────┴────────┘
297
+ # 12/14 mounts active
298
+ # 🏥 Health Check Status: OK
299
+
300
+ # Trigger full health check
293
301
  clonebox status . --user --health
294
302
 
295
- # View detailed health report in VM:
296
- # cat /var/log/clonebox-health.log
303
+ # If mounts are missing, remount or rebuild:
304
+ # In VM: sudo mount -a
305
+ # Or rebuild: clonebox clone . --user --run --replace
297
306
  ```
298
307
 
299
308
  ### Export/Import Workflow
@@ -590,7 +599,8 @@ clonebox clone . --network auto
590
599
  | `clonebox detect --yaml` | Output as YAML config |
591
600
  | `clonebox detect --yaml --dedupe` | YAML with duplicates removed |
592
601
  | `clonebox detect --json` | Output as JSON |
593
- | `clonebox status . --user` | Check VM health, cloud-init status, and IP address |
602
+ | `clonebox status . --user` | Check VM health, cloud-init, IP, and mount status |
603
+ | `clonebox status . --user --health` | Check VM status and run full health check |
594
604
  | `clonebox test . --user` | Test VM configuration and validate all settings |
595
605
  | `clonebox export . --user` | Export VM for migration to another workstation |
596
606
  | `clonebox export . --user --include-data` | Export VM with browser profiles and configs |
@@ -6,6 +6,7 @@ src/clonebox/__main__.py
6
6
  src/clonebox/cli.py
7
7
  src/clonebox/cloner.py
8
8
  src/clonebox/detector.py
9
+ src/clonebox/validator.py
9
10
  src/clonebox.egg-info/PKG-INFO
10
11
  src/clonebox.egg-info/SOURCES.txt
11
12
  src/clonebox.egg-info/dependency_links.txt
File without changes
File without changes
File without changes