clonebox 1.1.18__py3-none-any.whl → 1.1.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/backends/libvirt_backend.py +3 -1
- clonebox/cli.py +604 -557
- clonebox/cloner.py +465 -412
- clonebox/health/probes.py +14 -0
- clonebox/policies/__init__.py +13 -0
- clonebox/policies/engine.py +112 -0
- clonebox/policies/models.py +55 -0
- clonebox/policies/validators.py +26 -0
- clonebox/validator.py +93 -31
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/METADATA +1 -1
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/RECORD +15 -11
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/WHEEL +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/entry_points.txt +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/licenses/LICENSE +0 -0
- {clonebox-1.1.18.dist-info → clonebox-1.1.20.dist-info}/top_level.txt +0 -0
clonebox/cli.py
CHANGED
|
@@ -7,8 +7,10 @@ import argparse
|
|
|
7
7
|
import json
|
|
8
8
|
import os
|
|
9
9
|
import re
|
|
10
|
+
import secrets
|
|
10
11
|
import sys
|
|
11
12
|
import time
|
|
13
|
+
from dataclasses import asdict
|
|
12
14
|
from typing import Any, Dict, Optional, Tuple
|
|
13
15
|
from datetime import datetime
|
|
14
16
|
from pathlib import Path
|
|
@@ -37,6 +39,7 @@ from clonebox.health import HealthCheckManager, ProbeConfig, ProbeType
|
|
|
37
39
|
from clonebox.audit import get_audit_logger, AuditQuery, AuditEventType, AuditOutcome
|
|
38
40
|
from clonebox.orchestrator import Orchestrator, OrchestrationResult
|
|
39
41
|
from clonebox.plugins import get_plugin_manager, PluginHook, PluginContext
|
|
42
|
+
from clonebox.policies import PolicyEngine, PolicyValidationError, PolicyViolationError
|
|
40
43
|
from clonebox.remote import RemoteCloner, RemoteConnection
|
|
41
44
|
|
|
42
45
|
# Custom questionary style
|
|
@@ -263,8 +266,6 @@ def run_vm_diagnostics(
|
|
|
263
266
|
console.print(f"[dim]{domifaddr.stdout.strip()}[/]")
|
|
264
267
|
else:
|
|
265
268
|
console.print("[yellow]⚠️ No interface address detected via virsh domifaddr[/]")
|
|
266
|
-
if verbose and domifaddr.stderr.strip():
|
|
267
|
-
console.print(f"[dim]{domifaddr.stderr.strip()}[/]")
|
|
268
269
|
# Fallback: try to get IP via QEMU Guest Agent (useful for slirp/user networking)
|
|
269
270
|
if guest_agent_ready:
|
|
270
271
|
try:
|
|
@@ -349,7 +350,7 @@ def run_vm_diagnostics(
|
|
|
349
350
|
if not guest_agent_ready:
|
|
350
351
|
result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
|
|
351
352
|
console.print(
|
|
352
|
-
"[yellow]⏳ Cloud-init status: Unknown (QEMU
|
|
353
|
+
"[yellow]⏳ Cloud-init status: Unknown (QEMU Guest Agent not connected yet)[/]"
|
|
353
354
|
)
|
|
354
355
|
else:
|
|
355
356
|
ready_msg = _qga_exec(
|
|
@@ -453,7 +454,7 @@ def run_vm_diagnostics(
|
|
|
453
454
|
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
454
455
|
if not guest_agent_ready:
|
|
455
456
|
result["health"]["status"] = "unknown"
|
|
456
|
-
console.print("[dim]Health status: Not available yet (QEMU
|
|
457
|
+
console.print("[dim]Health status: Not available yet (QEMU Guest Agent not ready)[/]")
|
|
457
458
|
else:
|
|
458
459
|
health_status = _qga_exec(
|
|
459
460
|
vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10
|
|
@@ -609,7 +610,6 @@ def cmd_logs(args):
|
|
|
609
610
|
|
|
610
611
|
name = args.name
|
|
611
612
|
user_session = getattr(args, "user", False)
|
|
612
|
-
show_all = getattr(args, "all", False)
|
|
613
613
|
|
|
614
614
|
try:
|
|
615
615
|
vm_name, _ = _resolve_vm_name_and_config_file(name)
|
|
@@ -629,7 +629,7 @@ def cmd_logs(args):
|
|
|
629
629
|
try:
|
|
630
630
|
console.print(f"[cyan]📋 Opening logs for VM: {vm_name}[/]")
|
|
631
631
|
subprocess.run(
|
|
632
|
-
[str(logs_script), vm_name, "true" if user_session else "false", "true" if
|
|
632
|
+
[str(logs_script), vm_name, "true" if user_session else "false", "true" if getattr(args, "all", False) else "false"],
|
|
633
633
|
check=True
|
|
634
634
|
)
|
|
635
635
|
except subprocess.CalledProcessError as e:
|
|
@@ -942,7 +942,7 @@ def interactive_mode():
|
|
|
942
942
|
)
|
|
943
943
|
|
|
944
944
|
try:
|
|
945
|
-
cloner = SelectiveVMCloner(user_session=
|
|
945
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
946
946
|
|
|
947
947
|
# Check prerequisites
|
|
948
948
|
checks = cloner.check_prerequisites()
|
|
@@ -1192,13 +1192,42 @@ def cmd_delete(args):
|
|
|
1192
1192
|
# If name is a path, load config
|
|
1193
1193
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1194
1194
|
target_path = Path(name).expanduser().resolve()
|
|
1195
|
-
|
|
1195
|
+
|
|
1196
|
+
if target_path.is_dir():
|
|
1197
|
+
config_file = target_path / CLONEBOX_CONFIG_FILE
|
|
1198
|
+
else:
|
|
1199
|
+
config_file = target_path
|
|
1200
|
+
|
|
1196
1201
|
if config_file.exists():
|
|
1197
1202
|
config = load_clonebox_config(config_file)
|
|
1198
1203
|
name = config["vm"]["name"]
|
|
1199
1204
|
else:
|
|
1200
1205
|
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1201
1206
|
return
|
|
1207
|
+
elif not name or name == ".":
|
|
1208
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
1209
|
+
if config_file.exists():
|
|
1210
|
+
config = load_clonebox_config(config_file)
|
|
1211
|
+
name = config["vm"]["name"]
|
|
1212
|
+
else:
|
|
1213
|
+
console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
|
|
1214
|
+
console.print("[dim]Usage: clonebox delete . or clonebox delete <vm-name>[/]")
|
|
1215
|
+
return
|
|
1216
|
+
|
|
1217
|
+
policy_start = None
|
|
1218
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1219
|
+
policy_start = Path(name).expanduser().resolve()
|
|
1220
|
+
|
|
1221
|
+
policy = PolicyEngine.load_effective(start=policy_start)
|
|
1222
|
+
if policy is not None:
|
|
1223
|
+
try:
|
|
1224
|
+
policy.assert_operation_approved(
|
|
1225
|
+
AuditEventType.VM_DELETE.value,
|
|
1226
|
+
approved=getattr(args, "approve", False),
|
|
1227
|
+
)
|
|
1228
|
+
except PolicyViolationError as e:
|
|
1229
|
+
console.print(f"[red]❌ {e}[/]")
|
|
1230
|
+
sys.exit(1)
|
|
1202
1231
|
|
|
1203
1232
|
if not args.yes:
|
|
1204
1233
|
if not questionary.confirm(
|
|
@@ -1207,6 +1236,19 @@ def cmd_delete(args):
|
|
|
1207
1236
|
console.print("[yellow]Cancelled.[/]")
|
|
1208
1237
|
return
|
|
1209
1238
|
|
|
1239
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1240
|
+
delete_storage = not getattr(args, "keep_storage", False)
|
|
1241
|
+
console.print(f"[cyan]🗑️ Deleting VM: {name}[/]")
|
|
1242
|
+
try:
|
|
1243
|
+
ok = cloner.delete_vm(
|
|
1244
|
+
name, delete_storage=delete_storage, console=console, approved=getattr(args, "approve", False)
|
|
1245
|
+
)
|
|
1246
|
+
if not ok:
|
|
1247
|
+
sys.exit(1)
|
|
1248
|
+
except Exception as e:
|
|
1249
|
+
console.print(f"[red]❌ Failed to delete VM: {e}[/]")
|
|
1250
|
+
sys.exit(1)
|
|
1251
|
+
|
|
1210
1252
|
|
|
1211
1253
|
def cmd_list(args):
|
|
1212
1254
|
"""List all VMs."""
|
|
@@ -1418,7 +1460,12 @@ def cmd_export(args):
|
|
|
1418
1460
|
# If name is a path, load config
|
|
1419
1461
|
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1420
1462
|
target_path = Path(name).expanduser().resolve()
|
|
1421
|
-
|
|
1463
|
+
|
|
1464
|
+
if target_path.is_dir():
|
|
1465
|
+
config_file = target_path / CLONEBOX_CONFIG_FILE
|
|
1466
|
+
else:
|
|
1467
|
+
config_file = target_path
|
|
1468
|
+
|
|
1422
1469
|
if config_file.exists():
|
|
1423
1470
|
config = load_clonebox_config(config_file)
|
|
1424
1471
|
name = config["vm"]["name"]
|
|
@@ -1649,6 +1696,16 @@ def cmd_import(args):
|
|
|
1649
1696
|
f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]"
|
|
1650
1697
|
)
|
|
1651
1698
|
return
|
|
1699
|
+
policy = PolicyEngine.load_effective(start=vm_storage)
|
|
1700
|
+
if policy is not None:
|
|
1701
|
+
try:
|
|
1702
|
+
policy.assert_operation_approved(
|
|
1703
|
+
AuditEventType.VM_DELETE.value,
|
|
1704
|
+
approved=getattr(args, "approve", False),
|
|
1705
|
+
)
|
|
1706
|
+
except PolicyViolationError as e:
|
|
1707
|
+
console.print(f"[red]❌ {e}[/]")
|
|
1708
|
+
sys.exit(1)
|
|
1652
1709
|
shutil.rmtree(vm_storage)
|
|
1653
1710
|
|
|
1654
1711
|
vm_storage.mkdir(parents=True)
|
|
@@ -1811,7 +1868,7 @@ def cmd_test(args):
|
|
|
1811
1868
|
console.print()
|
|
1812
1869
|
|
|
1813
1870
|
# Test 2: Check VM state
|
|
1814
|
-
|
|
1871
|
+
cloud_init_running = False
|
|
1815
1872
|
try:
|
|
1816
1873
|
result = subprocess.run(
|
|
1817
1874
|
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
@@ -1820,17 +1877,37 @@ def cmd_test(args):
|
|
|
1820
1877
|
timeout=10,
|
|
1821
1878
|
)
|
|
1822
1879
|
state = result.stdout.strip()
|
|
1880
|
+
|
|
1823
1881
|
if state == "running":
|
|
1824
1882
|
console.print("[green]✅ VM is running[/]")
|
|
1825
1883
|
|
|
1826
1884
|
# Give QEMU Guest Agent some time to come up (common during early boot)
|
|
1827
1885
|
qga_ready = _qga_ping(vm_name, conn_uri)
|
|
1828
1886
|
if not qga_ready:
|
|
1829
|
-
for
|
|
1887
|
+
console.print("[yellow]⏳ Waiting for QEMU Guest Agent (up to 60s)...[/]")
|
|
1888
|
+
qga_wait_start = time.time()
|
|
1889
|
+
for attempt in range(12): # ~60s
|
|
1830
1890
|
time.sleep(5)
|
|
1831
1891
|
qga_ready = _qga_ping(vm_name, conn_uri)
|
|
1892
|
+
elapsed = int(time.time() - qga_wait_start)
|
|
1832
1893
|
if qga_ready:
|
|
1894
|
+
console.print(f"[green]✅ QEMU Guest Agent connected after {elapsed}s[/]")
|
|
1833
1895
|
break
|
|
1896
|
+
if attempt % 2 == 1:
|
|
1897
|
+
console.print(f"[dim] ...still waiting ({elapsed}s elapsed)[/]")
|
|
1898
|
+
|
|
1899
|
+
if not qga_ready:
|
|
1900
|
+
console.print("[yellow]⚠️ QEMU Guest Agent still not connected[/]")
|
|
1901
|
+
|
|
1902
|
+
# Check cloud-init status immediately if QGA is ready
|
|
1903
|
+
if qga_ready:
|
|
1904
|
+
console.print("[dim] Checking cloud-init status via QGA...[/]")
|
|
1905
|
+
status = _qga_exec(
|
|
1906
|
+
vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=15
|
|
1907
|
+
)
|
|
1908
|
+
if status and "running" in status.lower():
|
|
1909
|
+
cloud_init_running = True
|
|
1910
|
+
console.print("[yellow]⏳ Setup in progress (cloud-init is running)[/]")
|
|
1834
1911
|
|
|
1835
1912
|
# Test network if running
|
|
1836
1913
|
console.print("\n Checking network...")
|
|
@@ -1859,8 +1936,9 @@ def cmd_test(args):
|
|
|
1859
1936
|
timeout=5,
|
|
1860
1937
|
)
|
|
1861
1938
|
if ip_out and ip_out.strip():
|
|
1939
|
+
ip_clean = ip_out.strip().replace("\n", ", ")
|
|
1862
1940
|
console.print(
|
|
1863
|
-
f"[green]✅ VM has network access (IP via QGA: {
|
|
1941
|
+
f"[green]✅ VM has network access (IP via QGA: {ip_clean})[/]"
|
|
1864
1942
|
)
|
|
1865
1943
|
else:
|
|
1866
1944
|
console.print("[yellow]⚠️ IP not available via QGA[/]")
|
|
@@ -1880,14 +1958,15 @@ def cmd_test(args):
|
|
|
1880
1958
|
|
|
1881
1959
|
# Test 3: Check cloud-init status (if running)
|
|
1882
1960
|
cloud_init_complete: Optional[bool] = None
|
|
1883
|
-
cloud_init_running: bool = False
|
|
1884
1961
|
if not quick and state == "running":
|
|
1885
1962
|
console.print("[bold]3. Cloud-init Status[/]")
|
|
1886
1963
|
try:
|
|
1887
1964
|
if not qga_ready:
|
|
1888
1965
|
console.print("[yellow]⚠️ Cloud-init status unknown (QEMU Guest Agent not connected)[/]")
|
|
1889
1966
|
else:
|
|
1890
|
-
status = _qga_exec(
|
|
1967
|
+
status = _qga_exec(
|
|
1968
|
+
vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=15
|
|
1969
|
+
)
|
|
1891
1970
|
if status is None:
|
|
1892
1971
|
console.print("[yellow]⚠️ Could not check cloud-init (QGA command failed)[/]")
|
|
1893
1972
|
cloud_init_complete = None
|
|
@@ -1914,9 +1993,11 @@ def cmd_test(args):
|
|
|
1914
1993
|
if not quick and state == "running":
|
|
1915
1994
|
console.print("[bold]4. Mount Points Check[/]")
|
|
1916
1995
|
paths = config.get("paths", {})
|
|
1917
|
-
|
|
1996
|
+
copy_paths = config.get("copy_paths", None)
|
|
1997
|
+
if not isinstance(copy_paths, dict) or not copy_paths:
|
|
1998
|
+
copy_paths = config.get("app_data_paths", {})
|
|
1918
1999
|
|
|
1919
|
-
if paths or
|
|
2000
|
+
if paths or copy_paths:
|
|
1920
2001
|
if not _qga_ping(vm_name, conn_uri):
|
|
1921
2002
|
console.print("[yellow]⚠️ QEMU guest agent not connected - cannot verify mounts[/]")
|
|
1922
2003
|
else:
|
|
@@ -1938,7 +2019,7 @@ def cmd_test(args):
|
|
|
1938
2019
|
console.print(f"[yellow]⚠️ {guest_path} (could not check)[/]")
|
|
1939
2020
|
|
|
1940
2021
|
# Check copied paths
|
|
1941
|
-
for idx, (host_path, guest_path) in enumerate(
|
|
2022
|
+
for idx, (host_path, guest_path) in enumerate(copy_paths.items()):
|
|
1942
2023
|
try:
|
|
1943
2024
|
is_accessible = _qga_exec(
|
|
1944
2025
|
vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5
|
|
@@ -1965,23 +2046,14 @@ def cmd_test(args):
|
|
|
1965
2046
|
console.print("[yellow]⚠️ QEMU Guest Agent not connected - cannot run health check[/]")
|
|
1966
2047
|
else:
|
|
1967
2048
|
exists = _qga_exec(
|
|
1968
|
-
vm_name,
|
|
1969
|
-
conn_uri,
|
|
1970
|
-
"test -x /usr/local/bin/clonebox-health && echo yes || echo no",
|
|
1971
|
-
timeout=10,
|
|
2049
|
+
vm_name, conn_uri, "test -x /usr/local/bin/clonebox-health && echo yes || echo no", timeout=10
|
|
1972
2050
|
)
|
|
1973
2051
|
if exists and exists.strip() == "yes":
|
|
1974
2052
|
_qga_exec(
|
|
1975
|
-
vm_name,
|
|
1976
|
-
conn_uri,
|
|
1977
|
-
"/usr/local/bin/clonebox-health >/dev/null 2>&1 || true",
|
|
1978
|
-
timeout=60,
|
|
2053
|
+
vm_name, conn_uri, "/usr/local/bin/clonebox-health >/dev/null 2>&1 || true", timeout=60
|
|
1979
2054
|
)
|
|
1980
2055
|
health_status = _qga_exec(
|
|
1981
|
-
vm_name,
|
|
1982
|
-
conn_uri,
|
|
1983
|
-
"cat /var/log/clonebox-health-status 2>/dev/null || true",
|
|
1984
|
-
timeout=10,
|
|
2056
|
+
vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10
|
|
1985
2057
|
)
|
|
1986
2058
|
if health_status and "HEALTH_STATUS=OK" in health_status:
|
|
1987
2059
|
console.print("[green]✅ Health check passed[/]")
|
|
@@ -1999,9 +2071,6 @@ def cmd_test(args):
|
|
|
1999
2071
|
else:
|
|
2000
2072
|
console.print("[yellow]⚠️ Health check status not available yet[/]")
|
|
2001
2073
|
console.print(" View logs in VM: cat /var/log/clonebox-health.log")
|
|
2002
|
-
else:
|
|
2003
|
-
console.print("[yellow]⚠️ Health check script not found[/]")
|
|
2004
|
-
console.print(" This is expected until cloud-init completes")
|
|
2005
2074
|
except Exception as e:
|
|
2006
2075
|
console.print(f"[yellow]⚠️ Could not run health check: {e}[/]")
|
|
2007
2076
|
|
|
@@ -2009,6 +2078,8 @@ def cmd_test(args):
|
|
|
2009
2078
|
|
|
2010
2079
|
# Run full validation if requested
|
|
2011
2080
|
if validate_all and state == "running":
|
|
2081
|
+
console.print("[bold cyan]🔎 Starting deep validation (--validate)[/]")
|
|
2082
|
+
console.print("[dim]This can take a few minutes on first boot (waiting for QGA/cloud-init, checking packages/services).[/]")
|
|
2012
2083
|
validator = VMValidator(
|
|
2013
2084
|
config,
|
|
2014
2085
|
vm_name,
|
|
@@ -2147,11 +2218,11 @@ def generate_clonebox_yaml(
|
|
|
2147
2218
|
paths_by_type = {"project": [], "config": [], "data": []}
|
|
2148
2219
|
for p in snapshot.paths:
|
|
2149
2220
|
if p.type in paths_by_type:
|
|
2150
|
-
paths_by_type[p.type].append(p
|
|
2221
|
+
paths_by_type[p.type].append(p)
|
|
2151
2222
|
|
|
2152
2223
|
if deduplicate:
|
|
2153
2224
|
for ptype in paths_by_type:
|
|
2154
|
-
paths_by_type[ptype] = deduplicate_list(paths_by_type[ptype])
|
|
2225
|
+
paths_by_type[ptype] = deduplicate_list(paths_by_type[ptype], key=lambda x: x.path)
|
|
2155
2226
|
|
|
2156
2227
|
# Collect working directories from running apps
|
|
2157
2228
|
working_dirs = []
|
|
@@ -2172,7 +2243,8 @@ def generate_clonebox_yaml(
|
|
|
2172
2243
|
# Build paths mapping
|
|
2173
2244
|
paths_mapping = {}
|
|
2174
2245
|
idx = 0
|
|
2175
|
-
for
|
|
2246
|
+
for host_path_obj in paths_by_type["project"][:5]: # Limit projects
|
|
2247
|
+
host_path = host_path_obj.path if hasattr(host_path_obj, 'path') else host_path_obj
|
|
2176
2248
|
paths_mapping[host_path] = f"/mnt/project{idx}"
|
|
2177
2249
|
idx += 1
|
|
2178
2250
|
|
|
@@ -2254,10 +2326,6 @@ def generate_clonebox_yaml(
|
|
|
2254
2326
|
if deduplicate:
|
|
2255
2327
|
all_snap_packages = deduplicate_list(all_snap_packages)
|
|
2256
2328
|
|
|
2257
|
-
if chrome_profile.exists() and "google-chrome" not in [d.get("app", "") for d in app_data_dirs]:
|
|
2258
|
-
if "chromium" not in all_snap_packages:
|
|
2259
|
-
all_snap_packages.append("chromium")
|
|
2260
|
-
|
|
2261
2329
|
if "pycharm-community" in all_snap_packages:
|
|
2262
2330
|
remapped = {}
|
|
2263
2331
|
for host_path, guest_path in app_data_mapping.items():
|
|
@@ -2315,9 +2383,9 @@ def generate_clonebox_yaml(
|
|
|
2315
2383
|
for d in app_data_dirs[:15]
|
|
2316
2384
|
],
|
|
2317
2385
|
"all_paths": {
|
|
2318
|
-
"projects":
|
|
2319
|
-
"configs":
|
|
2320
|
-
"data":
|
|
2386
|
+
"projects": [{"path": p.path if hasattr(p, 'path') else p, "type": p.type if hasattr(p, 'type') else 'project', "size_mb": p.size_mb if hasattr(p, 'size_mb') else 0} for p in paths_by_type["project"]],
|
|
2387
|
+
"configs": [{"path": p.path, "type": p.type, "size_mb": p.size_mb} for p in paths_by_type["config"][:5]],
|
|
2388
|
+
"data": [{"path": p.path, "type": p.type, "size_mb": p.size_mb} for p in paths_by_type["data"][:5]],
|
|
2321
2389
|
},
|
|
2322
2390
|
},
|
|
2323
2391
|
}
|
|
@@ -2417,7 +2485,7 @@ def _exec_in_vm_qga(vm_name: str, conn_uri: str, command: str) -> Optional[str]:
|
|
|
2417
2485
|
return None
|
|
2418
2486
|
|
|
2419
2487
|
|
|
2420
|
-
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int =
|
|
2488
|
+
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 1800):
|
|
2421
2489
|
"""Monitor cloud-init status in VM and show progress."""
|
|
2422
2490
|
import subprocess
|
|
2423
2491
|
import time
|
|
@@ -2429,582 +2497,454 @@ def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout:
|
|
|
2429
2497
|
last_phases = []
|
|
2430
2498
|
seen_lines = set()
|
|
2431
2499
|
|
|
2432
|
-
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
console=console,
|
|
2436
|
-
) as progress:
|
|
2437
|
-
task = progress.add_task("[cyan]Starting VM and initializing...", total=None)
|
|
2438
|
-
|
|
2439
|
-
while time.time() - start_time < timeout:
|
|
2440
|
-
try:
|
|
2441
|
-
elapsed = int(time.time() - start_time)
|
|
2442
|
-
minutes = elapsed // 60
|
|
2443
|
-
seconds = elapsed % 60
|
|
2444
|
-
|
|
2445
|
-
# Check VM state
|
|
2446
|
-
result = subprocess.run(
|
|
2447
|
-
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
2448
|
-
capture_output=True,
|
|
2449
|
-
text=True,
|
|
2450
|
-
timeout=5,
|
|
2451
|
-
)
|
|
2500
|
+
refresh = 1.0
|
|
2501
|
+
once = False
|
|
2502
|
+
monitor = ResourceMonitor(conn_uri=conn_uri)
|
|
2452
2503
|
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2504
|
+
try:
|
|
2505
|
+
with Progress(
|
|
2506
|
+
SpinnerColumn(),
|
|
2507
|
+
TextColumn("[progress.description]{task.description}"),
|
|
2508
|
+
console=console,
|
|
2509
|
+
) as progress:
|
|
2510
|
+
task = progress.add_task("[cyan]Starting VM and initializing...", total=None)
|
|
2511
|
+
|
|
2512
|
+
while time.time() - start_time < timeout:
|
|
2513
|
+
# Clear screen for live update
|
|
2514
|
+
if not progress.finished:
|
|
2515
|
+
console.clear()
|
|
2516
|
+
|
|
2517
|
+
console.print("[bold cyan]📊 CloneBox Resource Monitor[/]")
|
|
2518
|
+
console.print()
|
|
2519
|
+
|
|
2520
|
+
# VM Stats
|
|
2521
|
+
vm_stats = monitor.get_all_vm_stats()
|
|
2522
|
+
if vm_stats:
|
|
2523
|
+
table = Table(title="🖥️ Virtual Machines", border_style="cyan")
|
|
2524
|
+
table.add_column("Name", style="bold")
|
|
2525
|
+
table.add_column("State")
|
|
2526
|
+
table.add_column("CPU %")
|
|
2527
|
+
table.add_column("Memory")
|
|
2528
|
+
table.add_column("Disk")
|
|
2529
|
+
table.add_column("Network I/O")
|
|
2530
|
+
|
|
2531
|
+
for vm in vm_stats:
|
|
2532
|
+
state_color = "green" if vm.state == "running" else "yellow"
|
|
2533
|
+
cpu_color = "red" if vm.cpu_percent > 80 else "green"
|
|
2534
|
+
mem_pct = (
|
|
2535
|
+
(vm.memory_used_mb / vm.memory_total_mb * 100)
|
|
2536
|
+
if vm.memory_total_mb > 0
|
|
2537
|
+
else 0
|
|
2464
2538
|
)
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2475
|
-
|
|
2476
|
-
task, description=f"[green]✓ VM ready! Total time: {minutes}m {seconds}s"
|
|
2477
|
-
)
|
|
2478
|
-
time.sleep(2)
|
|
2479
|
-
break
|
|
2480
|
-
|
|
2481
|
-
# Estimate remaining time (total ~12-15 minutes for full desktop install)
|
|
2482
|
-
if elapsed < 60:
|
|
2483
|
-
remaining = "~12-15 minutes"
|
|
2484
|
-
elif elapsed < 300:
|
|
2485
|
-
remaining = f"~{12 - minutes} minutes"
|
|
2486
|
-
elif elapsed < 600:
|
|
2487
|
-
remaining = f"~{10 - minutes} minutes"
|
|
2488
|
-
elif elapsed < 800:
|
|
2489
|
-
remaining = "finishing soon..."
|
|
2539
|
+
mem_color = "red" if mem_pct > 80 else "green"
|
|
2540
|
+
|
|
2541
|
+
table.add_row(
|
|
2542
|
+
vm.name,
|
|
2543
|
+
f"[{state_color}]{vm.state}[/]",
|
|
2544
|
+
f"[{cpu_color}]{vm.cpu_percent:.1f}%[/]",
|
|
2545
|
+
f"[{mem_color}]{vm.memory_used_mb}/{vm.memory_total_mb} MB[/]",
|
|
2546
|
+
f"{vm.disk_used_gb:.1f}/{vm.disk_total_gb:.1f} GB",
|
|
2547
|
+
f"↓{format_bytes(vm.network_rx_bytes)} ↑{format_bytes(vm.network_tx_bytes)}",
|
|
2548
|
+
)
|
|
2549
|
+
console.print(table)
|
|
2490
2550
|
else:
|
|
2491
|
-
|
|
2492
|
-
|
|
2493
|
-
|
|
2494
|
-
|
|
2495
|
-
|
|
2496
|
-
|
|
2497
|
-
|
|
2498
|
-
|
|
2499
|
-
|
|
2500
|
-
|
|
2501
|
-
|
|
2502
|
-
|
|
2503
|
-
|
|
2504
|
-
|
|
2505
|
-
|
|
2506
|
-
|
|
2507
|
-
|
|
2508
|
-
|
|
2509
|
-
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
2518
|
-
|
|
2519
|
-
|
|
2520
|
-
|
|
2521
|
-
|
|
2522
|
-
|
|
2523
|
-
|
|
2524
|
-
if len(last_phases) > 2:
|
|
2525
|
-
last_phases = last_phases[-2:]
|
|
2526
|
-
|
|
2527
|
-
if restart_detected:
|
|
2528
|
-
progress.update(
|
|
2529
|
-
task,
|
|
2530
|
-
description=f"[cyan]Finalizing setup... ({minutes}m {seconds}s, {remaining})",
|
|
2531
|
-
)
|
|
2532
|
-
elif last_phases:
|
|
2533
|
-
# Show the actual phase from logs
|
|
2534
|
-
current_status = last_phases[-1]
|
|
2535
|
-
progress.update(
|
|
2536
|
-
task,
|
|
2537
|
-
description=f"[cyan]{current_status} ({minutes}m {seconds}s, {remaining})",
|
|
2538
|
-
)
|
|
2551
|
+
console.print("[dim]No VMs found.[/]")
|
|
2552
|
+
|
|
2553
|
+
console.print()
|
|
2554
|
+
|
|
2555
|
+
# Container Stats
|
|
2556
|
+
container_stats = monitor.get_container_stats()
|
|
2557
|
+
if container_stats:
|
|
2558
|
+
table = Table(title="🐳 Containers", border_style="blue")
|
|
2559
|
+
table.add_column("Name", style="bold")
|
|
2560
|
+
table.add_column("State")
|
|
2561
|
+
table.add_column("CPU %")
|
|
2562
|
+
table.add_column("Memory")
|
|
2563
|
+
table.add_column("Network I/O")
|
|
2564
|
+
table.add_column("PIDs")
|
|
2565
|
+
|
|
2566
|
+
for c in container_stats:
|
|
2567
|
+
cpu_color = "red" if c.cpu_percent > 80 else "green"
|
|
2568
|
+
mem_pct = (
|
|
2569
|
+
(c.memory_used_mb / c.memory_limit_mb * 100)
|
|
2570
|
+
if c.memory_limit_mb > 0
|
|
2571
|
+
else 0
|
|
2572
|
+
)
|
|
2573
|
+
mem_color = "red" if mem_pct > 80 else "green"
|
|
2574
|
+
|
|
2575
|
+
table.add_row(
|
|
2576
|
+
c.name,
|
|
2577
|
+
f"[green]{c.state}[/]",
|
|
2578
|
+
f"[{cpu_color}]{c.cpu_percent:.1f}%[/]",
|
|
2579
|
+
f"[{mem_color}]{c.memory_used_mb}/{c.memory_limit_mb} MB[/]",
|
|
2580
|
+
f"↓{format_bytes(c.network_rx_bytes)} ↑{format_bytes(c.network_tx_bytes)}",
|
|
2581
|
+
str(c.pids),
|
|
2582
|
+
)
|
|
2583
|
+
console.print(table)
|
|
2539
2584
|
else:
|
|
2540
|
-
|
|
2541
|
-
task,
|
|
2542
|
-
description=f"[cyan]Installing packages... ({minutes}m {seconds}s, {remaining})",
|
|
2543
|
-
)
|
|
2585
|
+
console.print("[dim]No containers running.[/]")
|
|
2544
2586
|
|
|
2545
|
-
|
|
2546
|
-
|
|
2547
|
-
minutes = elapsed // 60
|
|
2548
|
-
seconds = elapsed % 60
|
|
2549
|
-
progress.update(
|
|
2550
|
-
task, description=f"[cyan]Configuring VM... ({minutes}m {seconds}s)"
|
|
2551
|
-
)
|
|
2587
|
+
if once:
|
|
2588
|
+
break
|
|
2552
2589
|
|
|
2553
|
-
|
|
2590
|
+
console.print(f"\n[dim]Refreshing every {refresh}s. Press Ctrl+C to exit.[/]")
|
|
2591
|
+
time.sleep(refresh)
|
|
2554
2592
|
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
)
|
|
2593
|
+
except KeyboardInterrupt:
|
|
2594
|
+
console.print("\n[yellow]Monitoring stopped.[/]")
|
|
2595
|
+
finally:
|
|
2596
|
+
monitor.close()
|
|
2560
2597
|
|
|
2561
2598
|
|
|
2562
|
-
def create_vm_from_config(
|
|
2563
|
-
|
|
2564
|
-
|
|
2565
|
-
user_session: bool = False,
|
|
2566
|
-
replace: bool = False,
|
|
2567
|
-
) -> str:
|
|
2568
|
-
"""Create VM from YAML config dict."""
|
|
2569
|
-
paths = config.get("paths", {})
|
|
2570
|
-
# Backwards compatible: v1 uses app_data_paths, newer configs may use copy_paths
|
|
2571
|
-
copy_paths = config.get("copy_paths", None)
|
|
2572
|
-
if not isinstance(copy_paths, dict) or not copy_paths:
|
|
2573
|
-
copy_paths = config.get("app_data_paths", {})
|
|
2574
|
-
|
|
2575
|
-
vm_section = config.get("vm") or {}
|
|
2576
|
-
|
|
2577
|
-
# Support both v1 (auth_method) and v2 (auth.method) config formats
|
|
2578
|
-
auth_section = vm_section.get("auth") or {}
|
|
2579
|
-
auth_method = auth_section.get("method") or vm_section.get("auth_method") or "ssh_key"
|
|
2580
|
-
|
|
2581
|
-
# v2 config: secrets provider
|
|
2582
|
-
secrets_section = config.get("secrets") or {}
|
|
2583
|
-
secrets_provider = secrets_section.get("provider", "auto")
|
|
2599
|
+
def create_vm_from_config(config, start=False, user_session=False, replace=False, approved=False):
|
|
2600
|
+
"""Create VM from configuration dict."""
|
|
2601
|
+
vm_config_dict = config.get("vm", {})
|
|
2584
2602
|
|
|
2585
|
-
#
|
|
2586
|
-
limits_section = config.get("limits") or {}
|
|
2587
|
-
resources = {
|
|
2588
|
-
"memory_limit": limits_section.get("memory_limit"),
|
|
2589
|
-
"cpu_shares": limits_section.get("cpu_shares"),
|
|
2590
|
-
"disk_limit": limits_section.get("disk_limit"),
|
|
2591
|
-
"network_limit": limits_section.get("network_limit"),
|
|
2592
|
-
}
|
|
2593
|
-
# Remove None values
|
|
2594
|
-
resources = {k: v for k, v in resources.items() if v is not None}
|
|
2595
|
-
|
|
2603
|
+
# Create VMConfig object
|
|
2596
2604
|
vm_config = VMConfig(
|
|
2597
|
-
name=
|
|
2598
|
-
ram_mb=
|
|
2599
|
-
vcpus=
|
|
2600
|
-
disk_size_gb=
|
|
2601
|
-
gui=
|
|
2602
|
-
base_image=
|
|
2603
|
-
|
|
2604
|
-
|
|
2605
|
+
name=vm_config_dict.get("name", "clonebox-vm"),
|
|
2606
|
+
ram_mb=vm_config_dict.get("ram_mb", 8192),
|
|
2607
|
+
vcpus=vm_config_dict.get("vcpus", 4),
|
|
2608
|
+
disk_size_gb=vm_config_dict.get("disk_size_gb", 20),
|
|
2609
|
+
gui=vm_config_dict.get("gui", True),
|
|
2610
|
+
base_image=vm_config_dict.get("base_image"),
|
|
2611
|
+
network_mode=vm_config_dict.get("network_mode", "auto"),
|
|
2612
|
+
username=vm_config_dict.get("username", "ubuntu"),
|
|
2613
|
+
password=vm_config_dict.get("password", "ubuntu"),
|
|
2614
|
+
user_session=user_session,
|
|
2615
|
+
paths=config.get("paths", {}),
|
|
2605
2616
|
packages=config.get("packages", []),
|
|
2606
2617
|
snap_packages=config.get("snap_packages", []),
|
|
2607
2618
|
services=config.get("services", []),
|
|
2608
2619
|
post_commands=config.get("post_commands", []),
|
|
2609
|
-
|
|
2610
|
-
|
|
2611
|
-
username=config["vm"].get("username", "ubuntu"),
|
|
2612
|
-
password=config["vm"].get("password", "ubuntu"),
|
|
2613
|
-
auth_method=auth_method,
|
|
2614
|
-
ssh_public_key=vm_section.get("ssh_public_key") or auth_section.get("ssh_public_key"),
|
|
2615
|
-
resources=resources if resources else config["vm"].get("resources", {}),
|
|
2620
|
+
copy_paths=(config.get("copy_paths") or config.get("app_data_paths") or {}),
|
|
2621
|
+
resources=config.get("resources", {}),
|
|
2616
2622
|
)
|
|
2617
|
-
|
|
2623
|
+
|
|
2618
2624
|
cloner = SelectiveVMCloner(user_session=user_session)
|
|
2619
|
-
|
|
2620
|
-
# Check prerequisites
|
|
2625
|
+
|
|
2626
|
+
# Check prerequisites
|
|
2621
2627
|
checks = cloner.check_prerequisites()
|
|
2622
|
-
|
|
2623
|
-
|
|
2624
|
-
|
|
2625
|
-
|
|
2626
|
-
console.print(f"
|
|
2627
|
-
|
|
2628
|
-
|
|
2629
|
-
|
|
2630
|
-
|
|
2631
|
-
|
|
2632
|
-
|
|
2628
|
+
if not all(checks.values()):
|
|
2629
|
+
console.print("[yellow]⚠️ Prerequisites check:[/]")
|
|
2630
|
+
for check, passed in checks.items():
|
|
2631
|
+
icon = "✅" if passed else "❌"
|
|
2632
|
+
console.print(f" {icon} {check}")
|
|
2633
|
+
|
|
2634
|
+
# Create VM
|
|
2635
|
+
vm_uuid = cloner.create_vm(
|
|
2636
|
+
vm_config,
|
|
2637
|
+
replace=replace,
|
|
2638
|
+
console=console,
|
|
2639
|
+
approved=approved,
|
|
2640
|
+
)
|
|
2641
|
+
|
|
2633
2642
|
if start:
|
|
2634
|
-
cloner.start_vm(vm_config.name, open_viewer=
|
|
2635
|
-
|
|
2636
|
-
# Monitor cloud-init progress if GUI is enabled
|
|
2637
|
-
if vm_config.gui:
|
|
2638
|
-
console.print("\n[bold cyan]📊 Monitoring setup progress...[/]")
|
|
2639
|
-
try:
|
|
2640
|
-
monitor_cloud_init_status(vm_config.name, user_session=user_session)
|
|
2641
|
-
except KeyboardInterrupt:
|
|
2642
|
-
console.print("\n[yellow]Monitoring stopped. VM continues setup in background.[/]")
|
|
2643
|
-
except Exception as e:
|
|
2644
|
-
console.print(
|
|
2645
|
-
f"\n[dim]Note: Could not monitor status ({e}). VM continues setup in background.[/]"
|
|
2646
|
-
)
|
|
2647
|
-
|
|
2643
|
+
cloner.start_vm(vm_config.name, open_viewer=True, console=console)
|
|
2644
|
+
|
|
2648
2645
|
return vm_uuid
|
|
2649
2646
|
|
|
2650
2647
|
|
|
2651
|
-
def cmd_clone(args):
|
|
2648
|
+
def cmd_clone(args) -> None:
|
|
2652
2649
|
"""Generate clone config from path and optionally create VM."""
|
|
2653
|
-
|
|
2654
|
-
|
|
2655
|
-
|
|
2650
|
+
from clonebox.detector import SystemDetector
|
|
2651
|
+
|
|
2652
|
+
target_path = Path(args.path).expanduser().resolve() if args.path else Path.cwd()
|
|
2653
|
+
|
|
2656
2654
|
if not target_path.exists():
|
|
2657
2655
|
console.print(f"[red]❌ Path does not exist: {target_path}[/]")
|
|
2658
2656
|
return
|
|
2659
|
-
|
|
2660
|
-
|
|
2661
|
-
|
|
2662
|
-
else:
|
|
2663
|
-
console.print(f"[bold cyan]📦 Generating clone config for: {target_path}[/]\n")
|
|
2664
|
-
|
|
2657
|
+
|
|
2658
|
+
console.print(f"[cyan]🔍 Analyzing system for cloning...[/]")
|
|
2659
|
+
|
|
2665
2660
|
# Detect system state
|
|
2661
|
+
detector = SystemDetector()
|
|
2662
|
+
|
|
2666
2663
|
with Progress(
|
|
2667
2664
|
SpinnerColumn(),
|
|
2668
2665
|
TextColumn("[progress.description]{task.description}"),
|
|
2669
2666
|
console=console,
|
|
2670
|
-
transient=True,
|
|
2671
2667
|
) as progress:
|
|
2672
|
-
progress.add_task("Scanning system...", total=None)
|
|
2673
|
-
|
|
2668
|
+
task = progress.add_task("Scanning system...", total=None)
|
|
2669
|
+
|
|
2670
|
+
# Take snapshot
|
|
2674
2671
|
snapshot = detector.detect_all()
|
|
2675
|
-
|
|
2672
|
+
|
|
2673
|
+
# Detect Docker containers
|
|
2674
|
+
containers = detector.detect_docker_containers()
|
|
2675
|
+
|
|
2676
|
+
progress.update(task, description="Finalizing...")
|
|
2677
|
+
|
|
2676
2678
|
# Generate config
|
|
2677
|
-
vm_name = args.name or f"clone-{target_path.name}"
|
|
2678
2679
|
yaml_content = generate_clonebox_yaml(
|
|
2679
2680
|
snapshot,
|
|
2680
2681
|
detector,
|
|
2681
2682
|
deduplicate=args.dedupe,
|
|
2682
|
-
target_path=str(target_path),
|
|
2683
|
-
vm_name=
|
|
2683
|
+
target_path=str(target_path) if args.path else None,
|
|
2684
|
+
vm_name=args.name,
|
|
2684
2685
|
network_mode=args.network,
|
|
2685
|
-
base_image=
|
|
2686
|
-
disk_size_gb=
|
|
2686
|
+
base_image=args.base_image,
|
|
2687
|
+
disk_size_gb=args.disk_size_gb,
|
|
2687
2688
|
)
|
|
2688
|
-
|
|
2689
|
-
profile_name = getattr(args, "profile", None)
|
|
2690
|
-
if profile_name:
|
|
2691
|
-
merged_config = merge_with_profile(yaml.safe_load(yaml_content), profile_name)
|
|
2692
|
-
if isinstance(merged_config, dict):
|
|
2693
|
-
vm_section = merged_config.get("vm")
|
|
2694
|
-
if isinstance(vm_section, dict):
|
|
2695
|
-
vm_packages = vm_section.pop("packages", None)
|
|
2696
|
-
if isinstance(vm_packages, list):
|
|
2697
|
-
packages = merged_config.get("packages")
|
|
2698
|
-
if not isinstance(packages, list):
|
|
2699
|
-
packages = []
|
|
2700
|
-
for p in vm_packages:
|
|
2701
|
-
if p not in packages:
|
|
2702
|
-
packages.append(p)
|
|
2703
|
-
merged_config["packages"] = packages
|
|
2704
|
-
|
|
2705
|
-
if "container" in merged_config:
|
|
2706
|
-
merged_config.pop("container", None)
|
|
2707
|
-
|
|
2708
|
-
yaml_content = yaml.dump(
|
|
2709
|
-
merged_config,
|
|
2710
|
-
default_flow_style=False,
|
|
2711
|
-
allow_unicode=True,
|
|
2712
|
-
sort_keys=False,
|
|
2713
|
-
)
|
|
2714
|
-
|
|
2715
|
-
# Dry run - show what would be created and exit
|
|
2716
|
-
if dry_run:
|
|
2717
|
-
config = yaml.safe_load(yaml_content)
|
|
2718
|
-
console.print(
|
|
2719
|
-
Panel(
|
|
2720
|
-
f"[bold]VM Name:[/] {config['vm']['name']}\n"
|
|
2721
|
-
f"[bold]RAM:[/] {config['vm'].get('ram_mb', 4096)} MB\n"
|
|
2722
|
-
f"[bold]vCPUs:[/] {config['vm'].get('vcpus', 4)}\n"
|
|
2723
|
-
f"[bold]Network:[/] {config['vm'].get('network_mode', 'auto')}\n"
|
|
2724
|
-
f"[bold]Paths:[/] {len(config.get('paths', {}))} mounts\n"
|
|
2725
|
-
f"[bold]Packages:[/] {len(config.get('packages', []))} packages\n"
|
|
2726
|
-
f"[bold]Services:[/] {len(config.get('services', []))} services",
|
|
2727
|
-
title="[bold cyan]Would create VM[/]",
|
|
2728
|
-
border_style="cyan",
|
|
2729
|
-
)
|
|
2730
|
-
)
|
|
2731
|
-
console.print("\n[dim]Config preview:[/]")
|
|
2732
|
-
console.print(Panel(yaml_content, title="[bold].clonebox.yaml[/]", border_style="dim"))
|
|
2733
|
-
console.print("\n[yellow]ℹ️ Dry run complete. No changes made.[/]")
|
|
2734
|
-
return
|
|
2735
|
-
|
|
2689
|
+
|
|
2736
2690
|
# Save config file
|
|
2737
|
-
config_file =
|
|
2738
|
-
|
|
2739
|
-
|
|
2740
|
-
|
|
2741
|
-
|
|
2742
|
-
|
|
2743
|
-
|
|
2744
|
-
|
|
2745
|
-
|
|
2746
|
-
|
|
2747
|
-
|
|
2748
|
-
|
|
2691
|
+
config_file = target_path / CLONEBOX_CONFIG_FILE
|
|
2692
|
+
|
|
2693
|
+
if config_file.exists() and not args.replace:
|
|
2694
|
+
console.print(f"[yellow]⚠️ Config file already exists: {config_file}[/]")
|
|
2695
|
+
if not questionary.confirm(
|
|
2696
|
+
"Overwrite existing config?", default=False, style=custom_style
|
|
2697
|
+
).ask():
|
|
2698
|
+
console.print("[dim]Cancelled.[/]")
|
|
2699
|
+
return
|
|
2700
|
+
|
|
2701
|
+
with open(config_file, "w") as f:
|
|
2702
|
+
f.write(yaml_content)
|
|
2703
|
+
|
|
2704
|
+
console.print(f"[green]✅ Config saved to: {config_file}[/]")
|
|
2705
|
+
|
|
2706
|
+
# Edit if requested
|
|
2749
2707
|
if args.edit:
|
|
2750
2708
|
editor = os.environ.get("EDITOR", "nano")
|
|
2751
|
-
console.print(f"[cyan]Opening {editor}...[/]")
|
|
2752
2709
|
os.system(f"{editor} {config_file}")
|
|
2753
|
-
|
|
2754
|
-
|
|
2755
|
-
|
|
2756
|
-
# Ask to create VM
|
|
2710
|
+
|
|
2711
|
+
# Run VM if requested
|
|
2757
2712
|
if args.run:
|
|
2758
|
-
|
|
2759
|
-
|
|
2760
|
-
|
|
2761
|
-
|
|
2762
|
-
)
|
|
2763
|
-
|
|
2764
|
-
if create_now:
|
|
2765
|
-
# Load config with environment variable expansion
|
|
2766
|
-
config = load_clonebox_config(config_file.parent)
|
|
2767
|
-
user_session = getattr(args, "user", False)
|
|
2768
|
-
|
|
2769
|
-
console.print("\n[bold cyan]🔧 Creating VM...[/]\n")
|
|
2770
|
-
if user_session:
|
|
2771
|
-
console.print("[cyan]Using user session (qemu:///session) - no root required[/]")
|
|
2772
|
-
|
|
2773
|
-
try:
|
|
2774
|
-
vm_uuid = create_vm_from_config(
|
|
2775
|
-
config,
|
|
2776
|
-
start=True,
|
|
2777
|
-
user_session=user_session,
|
|
2778
|
-
replace=getattr(args, "replace", False),
|
|
2779
|
-
)
|
|
2780
|
-
console.print(f"\n[bold green]🎉 VM '{config['vm']['name']}' is running![/]")
|
|
2781
|
-
console.print(f"[dim]UUID: {vm_uuid}[/]")
|
|
2782
|
-
|
|
2783
|
-
# Show GUI startup info if GUI is enabled
|
|
2784
|
-
if config.get("vm", {}).get("gui", False):
|
|
2785
|
-
username = config["vm"].get("username", "ubuntu")
|
|
2786
|
-
password = config["vm"].get("password", "ubuntu")
|
|
2787
|
-
console.print("\n[bold yellow]⏰ GUI Setup Process:[/]")
|
|
2788
|
-
console.print(" [yellow]•[/] Installing desktop environment (~5-10 minutes)")
|
|
2789
|
-
console.print(" [yellow]•[/] Running health checks on all components")
|
|
2790
|
-
console.print(" [yellow]•[/] Automatic restart after installation")
|
|
2791
|
-
console.print(" [yellow]•[/] GUI login screen will appear")
|
|
2792
|
-
console.print(
|
|
2793
|
-
f" [yellow]•[/] Login: [cyan]{username}[/] / [cyan]{'*' * len(password)}[/] (from .env)"
|
|
2794
|
-
)
|
|
2795
|
-
console.print("\n[dim]💡 Progress will be monitored automatically below[/]")
|
|
2796
|
-
|
|
2797
|
-
# Show health check info
|
|
2798
|
-
console.print("\n[bold]📊 Health Check (inside VM):[/]")
|
|
2799
|
-
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # View full report")
|
|
2800
|
-
console.print(" [cyan]cat /var/log/clonebox-health-status[/] # Quick status")
|
|
2801
|
-
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
2802
|
-
|
|
2803
|
-
# Show mount instructions
|
|
2804
|
-
paths = config.get("paths", {})
|
|
2805
|
-
app_data_paths = config.get("app_data_paths", {})
|
|
2806
|
-
|
|
2807
|
-
if paths:
|
|
2808
|
-
console.print("\n[bold]📁 Mounted paths (shared live):[/]")
|
|
2809
|
-
for idx, (host, guest) in enumerate(list(paths.items())[:5]):
|
|
2810
|
-
console.print(f" [dim]{host}[/] → [cyan]{guest}[/]")
|
|
2811
|
-
if len(paths) > 5:
|
|
2812
|
-
console.print(f" [dim]... and {len(paths) - 5} more paths[/]")
|
|
2813
|
-
|
|
2814
|
-
if app_data_paths:
|
|
2815
|
-
console.print("\n[bold]📥 Copied paths (one-time import):[/]")
|
|
2816
|
-
for idx, (host, guest) in enumerate(list(app_data_paths.items())[:5]):
|
|
2817
|
-
console.print(f" [dim]{host}[/] → [cyan]{guest}[/]")
|
|
2818
|
-
if len(app_data_paths) > 5:
|
|
2819
|
-
console.print(f" [dim]... and {len(app_data_paths) - 5} more paths[/]")
|
|
2820
|
-
except PermissionError as e:
|
|
2821
|
-
console.print(f"[red]❌ Permission Error:[/]\n{e}")
|
|
2822
|
-
console.print("\n[yellow]💡 Try running with --user flag:[/]")
|
|
2823
|
-
console.print(f" [cyan]clonebox clone {target_path} --user[/]")
|
|
2824
|
-
except Exception as e:
|
|
2825
|
-
console.print(f"[red]❌ Error: {e}[/]")
|
|
2826
|
-
else:
|
|
2827
|
-
console.print("\n[dim]To create VM later, run:[/]")
|
|
2828
|
-
console.print(f" [cyan]clonebox start {target_path}[/]")
|
|
2713
|
+
console.print("[cyan]🚀 Creating VM from config...[/]")
|
|
2714
|
+
config = load_clonebox_config(config_file)
|
|
2715
|
+
vm_uuid = create_vm_from_config(
|
|
2716
|
+
config, start=True, user_session=args.user, replace=args.replace, approved=args.approve
|
|
2717
|
+
)
|
|
2718
|
+
console.print(f"[green]✅ VM created: {vm_uuid}[/]")
|
|
2829
2719
|
|
|
2830
2720
|
|
|
2831
|
-
def cmd_detect(args):
|
|
2721
|
+
def cmd_detect(args) -> None:
|
|
2832
2722
|
"""Detect and show system state."""
|
|
2833
|
-
|
|
2834
|
-
|
|
2835
|
-
|
|
2836
|
-
|
|
2837
|
-
|
|
2838
|
-
|
|
2839
|
-
|
|
2840
|
-
|
|
2841
|
-
|
|
2723
|
+
from clonebox.detector import SystemDetector
|
|
2724
|
+
|
|
2725
|
+
console.print("[cyan]🔍 Detecting system state...[/]")
|
|
2726
|
+
|
|
2727
|
+
try:
|
|
2728
|
+
detector = SystemDetector()
|
|
2729
|
+
|
|
2730
|
+
# Detect system info
|
|
2731
|
+
sys_info = detector.get_system_info()
|
|
2732
|
+
|
|
2733
|
+
# Detect all services, apps, and paths
|
|
2734
|
+
snapshot = detector.detect_all()
|
|
2735
|
+
|
|
2736
|
+
# Detect Docker containers
|
|
2737
|
+
containers = detector.detect_docker_containers()
|
|
2738
|
+
|
|
2739
|
+
# Prepare output
|
|
2740
|
+
output = {
|
|
2741
|
+
"system": sys_info,
|
|
2742
|
+
"services": [
|
|
2743
|
+
{
|
|
2744
|
+
"name": s.name,
|
|
2745
|
+
"status": s.status,
|
|
2746
|
+
"enabled": s.enabled,
|
|
2747
|
+
"description": s.description,
|
|
2748
|
+
}
|
|
2749
|
+
for s in snapshot.running_services
|
|
2750
|
+
],
|
|
2842
2751
|
"applications": [
|
|
2843
|
-
{
|
|
2752
|
+
{
|
|
2753
|
+
"name": a.name,
|
|
2754
|
+
"pid": a.pid,
|
|
2755
|
+
"memory_mb": round(a.memory_mb, 2),
|
|
2756
|
+
"working_dir": a.working_dir or "",
|
|
2757
|
+
}
|
|
2758
|
+
for a in snapshot.applications
|
|
2844
2759
|
],
|
|
2845
2760
|
"paths": [
|
|
2846
|
-
{"path": p.path, "type": p.type, "size_mb": p.size_mb}
|
|
2761
|
+
{"path": p.path, "type": p.type, "size_mb": p.size_mb}
|
|
2762
|
+
for p in snapshot.paths
|
|
2763
|
+
],
|
|
2764
|
+
"docker_containers": [
|
|
2765
|
+
{
|
|
2766
|
+
"name": c["name"],
|
|
2767
|
+
"status": c["status"],
|
|
2768
|
+
"image": c["image"],
|
|
2769
|
+
"ports": c.get("ports", ""),
|
|
2770
|
+
}
|
|
2771
|
+
for c in containers
|
|
2847
2772
|
],
|
|
2848
2773
|
}
|
|
2849
|
-
|
|
2850
|
-
|
|
2851
|
-
|
|
2852
|
-
|
|
2853
|
-
|
|
2854
|
-
|
|
2855
|
-
|
|
2774
|
+
|
|
2775
|
+
# Apply deduplication if requested
|
|
2776
|
+
if args.dedupe:
|
|
2777
|
+
output["services"] = deduplicate_list(output["services"], key=lambda x: x["name"])
|
|
2778
|
+
output["applications"] = deduplicate_list(output["applications"], key=lambda x: (x["name"], x["pid"]))
|
|
2779
|
+
output["paths"] = deduplicate_list(output["paths"], key=lambda x: x["path"])
|
|
2780
|
+
|
|
2781
|
+
# Format output
|
|
2782
|
+
if args.json:
|
|
2783
|
+
content = json.dumps(output, indent=2)
|
|
2784
|
+
elif args.yaml:
|
|
2785
|
+
content = yaml.dump(output, default_flow_style=False, allow_unicode=True)
|
|
2786
|
+
else:
|
|
2787
|
+
# Pretty print
|
|
2788
|
+
content = format_detection_output(output, sys_info)
|
|
2789
|
+
|
|
2790
|
+
# Save to file or print
|
|
2856
2791
|
if args.output:
|
|
2857
|
-
|
|
2858
|
-
|
|
2859
|
-
console.print(f"[green]✅
|
|
2792
|
+
with open(args.output, "w") as f:
|
|
2793
|
+
f.write(content)
|
|
2794
|
+
console.print(f"[green]✅ Output saved to: {args.output}[/]")
|
|
2860
2795
|
else:
|
|
2861
|
-
print(
|
|
2862
|
-
|
|
2863
|
-
|
|
2864
|
-
|
|
2865
|
-
|
|
2866
|
-
|
|
2867
|
-
|
|
2868
|
-
if running:
|
|
2869
|
-
table = Table(title="Running Services", border_style="green")
|
|
2870
|
-
table.add_column("Service")
|
|
2871
|
-
table.add_column("Status")
|
|
2872
|
-
table.add_column("Enabled")
|
|
2873
|
-
|
|
2874
|
-
for svc in running:
|
|
2875
|
-
table.add_row(svc.name, f"[green]{svc.status}[/]", "✓" if svc.enabled else "")
|
|
2876
|
-
|
|
2877
|
-
console.print(table)
|
|
2878
|
-
|
|
2879
|
-
# Applications
|
|
2880
|
-
apps = detector.detect_applications()
|
|
2881
|
-
|
|
2882
|
-
if apps:
|
|
2883
|
-
console.print()
|
|
2884
|
-
table = Table(title="Running Applications", border_style="blue")
|
|
2885
|
-
table.add_column("Name")
|
|
2886
|
-
table.add_column("PID")
|
|
2887
|
-
table.add_column("Memory")
|
|
2888
|
-
table.add_column("Working Dir")
|
|
2889
|
-
|
|
2890
|
-
for app in apps[:15]:
|
|
2891
|
-
table.add_row(
|
|
2892
|
-
app.name,
|
|
2893
|
-
str(app.pid),
|
|
2894
|
-
f"{app.memory_mb:.0f} MB",
|
|
2895
|
-
app.working_dir[:40] if app.working_dir else "",
|
|
2896
|
-
)
|
|
2897
|
-
|
|
2898
|
-
console.print(table)
|
|
2899
|
-
|
|
2900
|
-
# Paths
|
|
2901
|
-
paths = detector.detect_paths()
|
|
2902
|
-
|
|
2903
|
-
if paths:
|
|
2904
|
-
console.print()
|
|
2905
|
-
table = Table(title="Detected Paths", border_style="yellow")
|
|
2906
|
-
table.add_column("Type")
|
|
2907
|
-
table.add_column("Path")
|
|
2908
|
-
table.add_column("Size")
|
|
2796
|
+
console.print(content)
|
|
2797
|
+
|
|
2798
|
+
except Exception as e:
|
|
2799
|
+
console.print(f"[red]Error: {e}[/]")
|
|
2800
|
+
import traceback
|
|
2801
|
+
traceback.print_exc()
|
|
2909
2802
|
|
|
2910
|
-
for p in paths[:20]:
|
|
2911
|
-
table.add_row(
|
|
2912
|
-
f"[cyan]{p.type}[/]", p.path, f"{p.size_mb:.0f} MB" if p.size_mb > 0 else "-"
|
|
2913
|
-
)
|
|
2914
2803
|
|
|
2915
|
-
|
|
2804
|
+
def format_detection_output(output, sys_info):
|
|
2805
|
+
"""Format detection output for console display."""
|
|
2806
|
+
from rich.table import Table
|
|
2807
|
+
from rich.text import Text
|
|
2808
|
+
|
|
2809
|
+
# System info
|
|
2810
|
+
system_text = Text()
|
|
2811
|
+
system_text.append(f"Hostname: {sys_info['hostname']}\n", style="bold")
|
|
2812
|
+
system_text.append(f"User: {sys_info['user']}\n")
|
|
2813
|
+
system_text.append(f"CPU: {sys_info['cpu_count']} cores\n")
|
|
2814
|
+
system_text.append(
|
|
2815
|
+
f"Memory: {sys_info['memory_total_gb']:.1f} GB total, {sys_info['memory_available_gb']:.1f} GB available\n"
|
|
2816
|
+
)
|
|
2817
|
+
system_text.append(
|
|
2818
|
+
f"Disk: {sys_info['disk_total_gb']:.1f} GB total, {sys_info['disk_free_gb']:.1f} GB free"
|
|
2819
|
+
)
|
|
2820
|
+
|
|
2821
|
+
# Services table
|
|
2822
|
+
services_table = Table(title="Services", show_header=True, header_style="bold magenta")
|
|
2823
|
+
services_table.add_column("Name", style="cyan")
|
|
2824
|
+
services_table.add_column("Status", style="green")
|
|
2825
|
+
services_table.add_column("Enabled", style="yellow")
|
|
2826
|
+
services_table.add_column("Description", style="dim")
|
|
2827
|
+
|
|
2828
|
+
for svc in output["services"]:
|
|
2829
|
+
status_style = "green" if svc["status"] == "running" else "red"
|
|
2830
|
+
enabled_text = "✓" if svc["enabled"] else "✗"
|
|
2831
|
+
services_table.add_row(
|
|
2832
|
+
svc["name"],
|
|
2833
|
+
Text(svc["status"], style=status_style),
|
|
2834
|
+
enabled_text,
|
|
2835
|
+
svc["description"] or "-",
|
|
2836
|
+
)
|
|
2837
|
+
|
|
2838
|
+
# Applications table
|
|
2839
|
+
apps_table = Table(title="Applications", show_header=True, header_style="bold magenta")
|
|
2840
|
+
apps_table.add_column("Name", style="cyan")
|
|
2841
|
+
apps_table.add_column("PID", justify="right")
|
|
2842
|
+
apps_table.add_column("Memory (MB)", justify="right")
|
|
2843
|
+
apps_table.add_column("Working Dir", style="dim")
|
|
2844
|
+
|
|
2845
|
+
for app in output["applications"]:
|
|
2846
|
+
apps_table.add_row(
|
|
2847
|
+
app["name"],
|
|
2848
|
+
str(app["pid"]),
|
|
2849
|
+
f"{app['memory_mb']:.1f}",
|
|
2850
|
+
app["working_dir"] or "-",
|
|
2851
|
+
)
|
|
2852
|
+
|
|
2853
|
+
# Combine output
|
|
2854
|
+
result = Panel(system_text, title="System Information", border_style="blue")
|
|
2855
|
+
result += "\n\n"
|
|
2856
|
+
result += services_table
|
|
2857
|
+
result += "\n\n"
|
|
2858
|
+
result += apps_table
|
|
2859
|
+
|
|
2860
|
+
return result
|
|
2916
2861
|
|
|
2917
2862
|
|
|
2918
2863
|
def cmd_monitor(args) -> None:
|
|
2919
|
-
"""Real-time resource monitoring
|
|
2920
|
-
|
|
2864
|
+
"""Real-time resource monitoring."""
|
|
2865
|
+
from clonebox.cloner import SelectiveVMCloner
|
|
2866
|
+
|
|
2867
|
+
user_session = getattr(args, "user", False)
|
|
2921
2868
|
refresh = getattr(args, "refresh", 2.0)
|
|
2922
2869
|
once = getattr(args, "once", False)
|
|
2923
|
-
|
|
2924
|
-
|
|
2925
|
-
|
|
2870
|
+
|
|
2871
|
+
cloner = SelectiveVMCloner(user_session=user_session)
|
|
2872
|
+
|
|
2926
2873
|
try:
|
|
2927
|
-
|
|
2928
|
-
|
|
2929
|
-
|
|
2930
|
-
|
|
2931
|
-
|
|
2932
|
-
|
|
2933
|
-
|
|
2934
|
-
|
|
2935
|
-
|
|
2936
|
-
|
|
2937
|
-
|
|
2938
|
-
|
|
2939
|
-
|
|
2940
|
-
|
|
2941
|
-
|
|
2942
|
-
|
|
2943
|
-
|
|
2944
|
-
|
|
2945
|
-
|
|
2946
|
-
|
|
2947
|
-
|
|
2948
|
-
cpu_color = "red" if vm.cpu_percent > 80 else "green"
|
|
2949
|
-
mem_pct = (
|
|
2950
|
-
(vm.memory_used_mb / vm.memory_total_mb * 100)
|
|
2951
|
-
if vm.memory_total_mb > 0
|
|
2952
|
-
else 0
|
|
2953
|
-
)
|
|
2954
|
-
mem_color = "red" if mem_pct > 80 else "green"
|
|
2955
|
-
|
|
2874
|
+
vms = cloner.list_vms()
|
|
2875
|
+
|
|
2876
|
+
if not vms:
|
|
2877
|
+
console.print("[dim]No VMs found.[/]")
|
|
2878
|
+
return
|
|
2879
|
+
|
|
2880
|
+
# Create monitor
|
|
2881
|
+
monitor = ResourceMonitor(conn_uri="qemu:///session" if user_session else "qemu:///system")
|
|
2882
|
+
|
|
2883
|
+
if once:
|
|
2884
|
+
# Show stats once
|
|
2885
|
+
table = Table(title="VM Resource Usage", show_header=True, header_style="bold magenta")
|
|
2886
|
+
table.add_column("VM Name", style="cyan")
|
|
2887
|
+
table.add_column("CPU %", justify="right")
|
|
2888
|
+
table.add_column("Memory", justify="right")
|
|
2889
|
+
table.add_column("Disk I/O", justify="right")
|
|
2890
|
+
table.add_column("Network I/O", justify="right")
|
|
2891
|
+
|
|
2892
|
+
for vm in vms:
|
|
2893
|
+
if vm["state"] == "running":
|
|
2894
|
+
stats = monitor.get_vm_stats(vm["name"])
|
|
2956
2895
|
table.add_row(
|
|
2957
|
-
vm
|
|
2958
|
-
f"
|
|
2959
|
-
|
|
2960
|
-
f"
|
|
2961
|
-
f"{
|
|
2962
|
-
f"↓{format_bytes(vm.network_rx_bytes)} ↑{format_bytes(vm.network_tx_bytes)}",
|
|
2896
|
+
vm["name"],
|
|
2897
|
+
f"{stats.get('cpu_percent', 0):.1f}%",
|
|
2898
|
+
format_bytes(stats.get("memory_usage", 0)),
|
|
2899
|
+
f"{stats.get('disk_read', 0)}/{stats.get('disk_write', 0)} MB/s",
|
|
2900
|
+
f"{stats.get('net_rx', 0)}/{stats.get('net_tx', 0)} MB/s",
|
|
2963
2901
|
)
|
|
2964
|
-
|
|
2965
|
-
|
|
2966
|
-
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2975
|
-
|
|
2976
|
-
|
|
2977
|
-
|
|
2978
|
-
|
|
2979
|
-
|
|
2980
|
-
|
|
2981
|
-
|
|
2982
|
-
cpu_color = "red" if c.cpu_percent > 80 else "green"
|
|
2983
|
-
mem_pct = (
|
|
2984
|
-
(c.memory_used_mb / c.memory_limit_mb * 100) if c.memory_limit_mb > 0 else 0
|
|
2985
|
-
)
|
|
2986
|
-
mem_color = "red" if mem_pct > 80 else "green"
|
|
2987
|
-
|
|
2988
|
-
table.add_row(
|
|
2989
|
-
c.name,
|
|
2990
|
-
f"[green]{c.state}[/]",
|
|
2991
|
-
f"[{cpu_color}]{c.cpu_percent:.1f}%[/]",
|
|
2992
|
-
f"[{mem_color}]{c.memory_used_mb}/{c.memory_limit_mb} MB[/]",
|
|
2993
|
-
f"↓{format_bytes(c.network_rx_bytes)} ↑{format_bytes(c.network_tx_bytes)}",
|
|
2994
|
-
str(c.pids),
|
|
2902
|
+
else:
|
|
2903
|
+
table.add_row(vm["name"], "[dim]not running[/]", "-", "-", "-")
|
|
2904
|
+
|
|
2905
|
+
console.print(table)
|
|
2906
|
+
else:
|
|
2907
|
+
# Continuous monitoring
|
|
2908
|
+
console.print(f"[cyan]Monitoring VMs (refresh every {refresh}s). Press Ctrl+C to exit.[/]\n")
|
|
2909
|
+
|
|
2910
|
+
try:
|
|
2911
|
+
while True:
|
|
2912
|
+
# Clear screen
|
|
2913
|
+
console.clear()
|
|
2914
|
+
|
|
2915
|
+
# Create table
|
|
2916
|
+
table = Table(
|
|
2917
|
+
title=f"VM Resource Usage - {datetime.now().strftime('%H:%M:%S')}",
|
|
2918
|
+
show_header=True,
|
|
2919
|
+
header_style="bold magenta",
|
|
2995
2920
|
)
|
|
2996
|
-
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
|
|
3001
|
-
|
|
3002
|
-
|
|
3003
|
-
|
|
3004
|
-
|
|
3005
|
-
|
|
3006
|
-
|
|
3007
|
-
|
|
2921
|
+
table.add_column("VM Name", style="cyan")
|
|
2922
|
+
table.add_column("State", style="green")
|
|
2923
|
+
table.add_column("CPU %", justify="right")
|
|
2924
|
+
table.add_column("Memory", justify="right")
|
|
2925
|
+
table.add_column("Disk I/O", justify="right")
|
|
2926
|
+
table.add_column("Network I/O", justify="right")
|
|
2927
|
+
|
|
2928
|
+
for vm in vms:
|
|
2929
|
+
if vm["state"] == "running":
|
|
2930
|
+
stats = monitor.get_vm_stats(vm["name"])
|
|
2931
|
+
table.add_row(
|
|
2932
|
+
vm["name"],
|
|
2933
|
+
vm["state"],
|
|
2934
|
+
f"{stats.get('cpu_percent', 0):.1f}%",
|
|
2935
|
+
format_bytes(stats.get("memory_usage", 0)),
|
|
2936
|
+
f"{stats.get('disk_read', 0):.1f}/{stats.get('disk_write', 0):.1f} MB/s",
|
|
2937
|
+
f"{stats.get('net_rx', 0):.1f}/{stats.get('net_tx', 0):.1f} MB/s",
|
|
2938
|
+
)
|
|
2939
|
+
else:
|
|
2940
|
+
table.add_row(vm["name"], f"[dim]{vm['state']}[/]", "-", "-", "-", "-")
|
|
2941
|
+
|
|
2942
|
+
console.print(table)
|
|
2943
|
+
time.sleep(refresh)
|
|
2944
|
+
|
|
2945
|
+
except KeyboardInterrupt:
|
|
2946
|
+
console.print("\n[yellow]Monitoring stopped.[/]")
|
|
2947
|
+
|
|
3008
2948
|
finally:
|
|
3009
2949
|
monitor.close()
|
|
3010
2950
|
|
|
@@ -3159,6 +3099,17 @@ def cmd_snapshot_restore(args) -> None:
|
|
|
3159
3099
|
vm_name, config_file = _resolve_vm_name_and_config_file(args.vm_name)
|
|
3160
3100
|
conn_uri = "qemu:///session" if getattr(args, "user", False) else "qemu:///system"
|
|
3161
3101
|
|
|
3102
|
+
policy = PolicyEngine.load_effective(start=config_file)
|
|
3103
|
+
if policy is not None:
|
|
3104
|
+
try:
|
|
3105
|
+
policy.assert_operation_approved(
|
|
3106
|
+
AuditEventType.VM_SNAPSHOT_RESTORE.value,
|
|
3107
|
+
approved=getattr(args, "approve", False),
|
|
3108
|
+
)
|
|
3109
|
+
except PolicyViolationError as e:
|
|
3110
|
+
console.print(f"[red]❌ {e}[/]")
|
|
3111
|
+
sys.exit(1)
|
|
3112
|
+
|
|
3162
3113
|
console.print(f"[cyan]🔄 Restoring snapshot: {args.name}[/]")
|
|
3163
3114
|
|
|
3164
3115
|
try:
|
|
@@ -3364,6 +3315,46 @@ def cmd_list_remote(args) -> None:
|
|
|
3364
3315
|
console.print("[yellow]No VMs found on remote host.[/]")
|
|
3365
3316
|
|
|
3366
3317
|
|
|
3318
|
+
def cmd_policy_validate(args) -> None:
|
|
3319
|
+
"""Validate a policy file."""
|
|
3320
|
+
try:
|
|
3321
|
+
file_arg = getattr(args, "file", None)
|
|
3322
|
+
if file_arg:
|
|
3323
|
+
policy_path = Path(file_arg).expanduser().resolve()
|
|
3324
|
+
else:
|
|
3325
|
+
policy_path = PolicyEngine.find_policy_file()
|
|
3326
|
+
|
|
3327
|
+
if not policy_path:
|
|
3328
|
+
console.print("[red]❌ Policy file not found[/]")
|
|
3329
|
+
sys.exit(1)
|
|
3330
|
+
|
|
3331
|
+
PolicyEngine.load(policy_path)
|
|
3332
|
+
console.print(f"[green]✅ Policy valid: {policy_path}[/]")
|
|
3333
|
+
except (PolicyValidationError, FileNotFoundError) as e:
|
|
3334
|
+
console.print(f"[red]❌ Policy invalid: {e}[/]")
|
|
3335
|
+
sys.exit(1)
|
|
3336
|
+
|
|
3337
|
+
|
|
3338
|
+
def cmd_policy_apply(args) -> None:
|
|
3339
|
+
"""Apply a policy file as project or global policy."""
|
|
3340
|
+
try:
|
|
3341
|
+
src = Path(args.file).expanduser().resolve()
|
|
3342
|
+
PolicyEngine.load(src)
|
|
3343
|
+
|
|
3344
|
+
scope = getattr(args, "scope", "project")
|
|
3345
|
+
if scope == "global":
|
|
3346
|
+
dest = Path.home() / ".clonebox.d" / "policy.yaml"
|
|
3347
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
3348
|
+
else:
|
|
3349
|
+
dest = Path.cwd() / ".clonebox-policy.yaml"
|
|
3350
|
+
|
|
3351
|
+
dest.write_text(src.read_text())
|
|
3352
|
+
console.print(f"[green]✅ Policy applied: {dest}[/]")
|
|
3353
|
+
except (PolicyValidationError, FileNotFoundError) as e:
|
|
3354
|
+
console.print(f"[red]❌ Failed to apply policy: {e}[/]")
|
|
3355
|
+
sys.exit(1)
|
|
3356
|
+
|
|
3357
|
+
|
|
3367
3358
|
# === Audit Commands ===
|
|
3368
3359
|
|
|
3369
3360
|
|
|
@@ -3930,6 +3921,17 @@ def cmd_remote_delete(args) -> None:
|
|
|
3930
3921
|
user_session = getattr(args, "user", False)
|
|
3931
3922
|
keep_storage = getattr(args, "keep_storage", False)
|
|
3932
3923
|
|
|
3924
|
+
policy = PolicyEngine.load_effective()
|
|
3925
|
+
if policy is not None:
|
|
3926
|
+
try:
|
|
3927
|
+
policy.assert_operation_approved(
|
|
3928
|
+
AuditEventType.VM_DELETE.value,
|
|
3929
|
+
approved=getattr(args, "approve", False),
|
|
3930
|
+
)
|
|
3931
|
+
except PolicyViolationError as e:
|
|
3932
|
+
console.print(f"[red]❌ {e}[/]")
|
|
3933
|
+
sys.exit(1)
|
|
3934
|
+
|
|
3933
3935
|
if not getattr(args, "yes", False):
|
|
3934
3936
|
confirm = questionary.confirm(
|
|
3935
3937
|
f"Delete VM '{vm_name}' on {host}?",
|
|
@@ -4105,6 +4107,11 @@ def main():
|
|
|
4105
4107
|
action="store_true",
|
|
4106
4108
|
help="Use user session (qemu:///session) - no root required",
|
|
4107
4109
|
)
|
|
4110
|
+
delete_parser.add_argument(
|
|
4111
|
+
"--approve",
|
|
4112
|
+
action="store_true",
|
|
4113
|
+
help="Approve policy-gated operation",
|
|
4114
|
+
)
|
|
4108
4115
|
delete_parser.set_defaults(func=cmd_delete)
|
|
4109
4116
|
|
|
4110
4117
|
# List command
|
|
@@ -4274,6 +4281,11 @@ def main():
|
|
|
4274
4281
|
action="store_true",
|
|
4275
4282
|
help="If VM already exists, stop+undefine it and recreate (also deletes its storage)",
|
|
4276
4283
|
)
|
|
4284
|
+
clone_parser.add_argument(
|
|
4285
|
+
"--approve",
|
|
4286
|
+
action="store_true",
|
|
4287
|
+
help="Approve policy-gated operation (required for --replace if policy demands)",
|
|
4288
|
+
)
|
|
4277
4289
|
clone_parser.add_argument(
|
|
4278
4290
|
"--dry-run",
|
|
4279
4291
|
action="store_true",
|
|
@@ -4413,14 +4425,12 @@ def main():
|
|
|
4413
4425
|
export_parser.add_argument(
|
|
4414
4426
|
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
4415
4427
|
)
|
|
4416
|
-
export_parser.add_argument(
|
|
4417
|
-
"-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
|
|
4418
|
-
)
|
|
4428
|
+
export_parser.add_argument("-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)")
|
|
4419
4429
|
export_parser.add_argument(
|
|
4420
4430
|
"--include-data",
|
|
4421
4431
|
"-d",
|
|
4422
4432
|
action="store_true",
|
|
4423
|
-
help="Include shared data (browser profiles, configs)
|
|
4433
|
+
help="Include shared data (browser profiles, configs)",
|
|
4424
4434
|
)
|
|
4425
4435
|
export_parser.set_defaults(func=cmd_export)
|
|
4426
4436
|
|
|
@@ -4433,6 +4443,11 @@ def main():
|
|
|
4433
4443
|
import_parser.add_argument(
|
|
4434
4444
|
"--replace", action="store_true", help="Replace existing VM if exists"
|
|
4435
4445
|
)
|
|
4446
|
+
import_parser.add_argument(
|
|
4447
|
+
"--approve",
|
|
4448
|
+
action="store_true",
|
|
4449
|
+
help="Approve policy-gated operation (required for --replace if policy demands)",
|
|
4450
|
+
)
|
|
4436
4451
|
import_parser.set_defaults(func=cmd_import)
|
|
4437
4452
|
|
|
4438
4453
|
# Test command - validate VM configuration
|
|
@@ -4512,6 +4527,11 @@ def main():
|
|
|
4512
4527
|
snap_restore.add_argument(
|
|
4513
4528
|
"-f", "--force", action="store_true", help="Force restore even if running"
|
|
4514
4529
|
)
|
|
4530
|
+
snap_restore.add_argument(
|
|
4531
|
+
"--approve",
|
|
4532
|
+
action="store_true",
|
|
4533
|
+
help="Approve policy-gated operation",
|
|
4534
|
+
)
|
|
4515
4535
|
snap_restore.set_defaults(func=cmd_snapshot_restore)
|
|
4516
4536
|
|
|
4517
4537
|
snap_delete = snapshot_sub.add_parser("delete", aliases=["rm"], help="Delete snapshot")
|
|
@@ -4700,6 +4720,28 @@ def main():
|
|
|
4700
4720
|
plugin_uninstall.add_argument("name", help="Plugin name")
|
|
4701
4721
|
plugin_uninstall.set_defaults(func=cmd_plugin_uninstall)
|
|
4702
4722
|
|
|
4723
|
+
policy_parser = subparsers.add_parser("policy", help="Manage security policies")
|
|
4724
|
+
policy_parser.set_defaults(func=lambda args, p=policy_parser: p.print_help())
|
|
4725
|
+
policy_sub = policy_parser.add_subparsers(dest="policy_command", help="Policy commands")
|
|
4726
|
+
|
|
4727
|
+
policy_validate = policy_sub.add_parser("validate", help="Validate policy file")
|
|
4728
|
+
policy_validate.add_argument(
|
|
4729
|
+
"--file",
|
|
4730
|
+
"-f",
|
|
4731
|
+
help="Policy file (default: auto-detect .clonebox-policy.yaml/.yml or ~/.clonebox.d/policy.yaml)",
|
|
4732
|
+
)
|
|
4733
|
+
policy_validate.set_defaults(func=cmd_policy_validate)
|
|
4734
|
+
|
|
4735
|
+
policy_apply = policy_sub.add_parser("apply", help="Apply policy file")
|
|
4736
|
+
policy_apply.add_argument("--file", "-f", required=True, help="Policy file to apply")
|
|
4737
|
+
policy_apply.add_argument(
|
|
4738
|
+
"--scope",
|
|
4739
|
+
choices=["project", "global"],
|
|
4740
|
+
default="project",
|
|
4741
|
+
help="Apply scope: project writes .clonebox-policy.yaml in CWD, global writes ~/.clonebox.d/policy.yaml",
|
|
4742
|
+
)
|
|
4743
|
+
policy_apply.set_defaults(func=cmd_policy_apply)
|
|
4744
|
+
|
|
4703
4745
|
# === Remote Management Commands ===
|
|
4704
4746
|
remote_parser = subparsers.add_parser("remote", help="Manage VMs on remote hosts")
|
|
4705
4747
|
remote_sub = remote_parser.add_subparsers(dest="remote_command", help="Remote commands")
|
|
@@ -4735,6 +4777,11 @@ def main():
|
|
|
4735
4777
|
remote_delete.add_argument("-u", "--user", action="store_true", help="Use user session on remote")
|
|
4736
4778
|
remote_delete.add_argument("-y", "--yes", action="store_true", help="Skip confirmation")
|
|
4737
4779
|
remote_delete.add_argument("--keep-storage", action="store_true", help="Keep disk images")
|
|
4780
|
+
remote_delete.add_argument(
|
|
4781
|
+
"--approve",
|
|
4782
|
+
action="store_true",
|
|
4783
|
+
help="Approve policy-gated operation",
|
|
4784
|
+
)
|
|
4738
4785
|
remote_delete.set_defaults(func=cmd_remote_delete)
|
|
4739
4786
|
|
|
4740
4787
|
remote_exec = remote_sub.add_parser("exec", help="Execute command in VM on remote host")
|