clonebox 0.1.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/__init__.py +14 -0
- clonebox/__main__.py +7 -0
- clonebox/cli.py +2932 -0
- clonebox/cloner.py +2081 -0
- clonebox/container.py +190 -0
- clonebox/dashboard.py +133 -0
- clonebox/detector.py +705 -0
- clonebox/models.py +201 -0
- clonebox/profiles.py +66 -0
- clonebox/templates/profiles/ml-dev.yaml +6 -0
- clonebox/validator.py +841 -0
- clonebox-0.1.25.dist-info/METADATA +1382 -0
- clonebox-0.1.25.dist-info/RECORD +17 -0
- clonebox-0.1.25.dist-info/WHEEL +5 -0
- clonebox-0.1.25.dist-info/entry_points.txt +2 -0
- clonebox-0.1.25.dist-info/licenses/LICENSE +201 -0
- clonebox-0.1.25.dist-info/top_level.txt +1 -0
clonebox/cli.py
ADDED
|
@@ -0,0 +1,2932 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
CloneBox CLI - Interactive command-line interface for creating VMs.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import argparse
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
from typing import Any, Dict, Optional, Tuple
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
import questionary
|
|
17
|
+
import yaml
|
|
18
|
+
from questionary import Style
|
|
19
|
+
from rich.console import Console
|
|
20
|
+
from rich.live import Live
|
|
21
|
+
from rich.panel import Panel
|
|
22
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
23
|
+
from rich.table import Table
|
|
24
|
+
|
|
25
|
+
from clonebox import __version__
|
|
26
|
+
from clonebox.cloner import SelectiveVMCloner, VMConfig
|
|
27
|
+
from clonebox.container import ContainerCloner
|
|
28
|
+
from clonebox.detector import SystemDetector
|
|
29
|
+
from clonebox.models import ContainerConfig
|
|
30
|
+
from clonebox.profiles import merge_with_profile
|
|
31
|
+
|
|
32
|
+
# Custom questionary style
|
|
33
|
+
custom_style = Style(
|
|
34
|
+
[
|
|
35
|
+
("qmark", "fg:cyan bold"),
|
|
36
|
+
("question", "bold"),
|
|
37
|
+
("answer", "fg:green"),
|
|
38
|
+
("pointer", "fg:cyan bold"),
|
|
39
|
+
("highlighted", "fg:cyan bold"),
|
|
40
|
+
("selected", "fg:green"),
|
|
41
|
+
("separator", "fg:gray"),
|
|
42
|
+
("instruction", "fg:gray italic"),
|
|
43
|
+
]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
console = Console()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def print_banner():
|
|
50
|
+
"""Print the CloneBox banner."""
|
|
51
|
+
banner = """
|
|
52
|
+
╔═══════════════════════════════════════════════════════════════╗
|
|
53
|
+
║ ____ _ ____ ║
|
|
54
|
+
║ / ___|| | ___ _ __ ___| _ \\ ___ __ __ ║
|
|
55
|
+
║ | | | | / _ \\ | '_ \\ / _ \\ |_) |/ _ \\\\ \\/ / ║
|
|
56
|
+
║ | |___ | || (_) || | | | __/ _ <| (_) |> < ║
|
|
57
|
+
║ \\____||_| \\___/ |_| |_|\\___|_| \\_\\\\___//_/\\_\\ ║
|
|
58
|
+
║ ║
|
|
59
|
+
║ Clone your workstation to an isolated VM ║
|
|
60
|
+
╚═══════════════════════════════════════════════════════════════╝
|
|
61
|
+
"""
|
|
62
|
+
console.print(banner, style="cyan")
|
|
63
|
+
console.print(f" Version {__version__}\n", style="dim")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _resolve_vm_name_and_config_file(name: Optional[str]) -> Tuple[str, Optional[Path]]:
|
|
67
|
+
config_file: Optional[Path] = None
|
|
68
|
+
|
|
69
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
70
|
+
target_path = Path(name).expanduser().resolve()
|
|
71
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
72
|
+
if config_file.exists():
|
|
73
|
+
config = load_clonebox_config(config_file)
|
|
74
|
+
return config["vm"]["name"], config_file
|
|
75
|
+
raise FileNotFoundError(f"Config not found: {config_file}")
|
|
76
|
+
|
|
77
|
+
if not name:
|
|
78
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
79
|
+
if config_file.exists():
|
|
80
|
+
config = load_clonebox_config(config_file)
|
|
81
|
+
return config["vm"]["name"], config_file
|
|
82
|
+
raise FileNotFoundError("No VM name specified and no .clonebox.yaml found")
|
|
83
|
+
|
|
84
|
+
return name, None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _qga_ping(vm_name: str, conn_uri: str) -> bool:
|
|
88
|
+
import subprocess
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
result = subprocess.run(
|
|
92
|
+
[
|
|
93
|
+
"virsh",
|
|
94
|
+
"--connect",
|
|
95
|
+
conn_uri,
|
|
96
|
+
"qemu-agent-command",
|
|
97
|
+
vm_name,
|
|
98
|
+
json.dumps({"execute": "guest-ping"}),
|
|
99
|
+
],
|
|
100
|
+
capture_output=True,
|
|
101
|
+
text=True,
|
|
102
|
+
timeout=5,
|
|
103
|
+
)
|
|
104
|
+
return result.returncode == 0
|
|
105
|
+
except Exception:
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _qga_exec(vm_name: str, conn_uri: str, command: str, timeout: int = 10) -> Optional[str]:
|
|
110
|
+
import subprocess
|
|
111
|
+
import base64
|
|
112
|
+
import time
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
payload = {
|
|
116
|
+
"execute": "guest-exec",
|
|
117
|
+
"arguments": {
|
|
118
|
+
"path": "/bin/sh",
|
|
119
|
+
"arg": ["-c", command],
|
|
120
|
+
"capture-output": True,
|
|
121
|
+
},
|
|
122
|
+
}
|
|
123
|
+
exec_result = subprocess.run(
|
|
124
|
+
[
|
|
125
|
+
"virsh",
|
|
126
|
+
"--connect",
|
|
127
|
+
conn_uri,
|
|
128
|
+
"qemu-agent-command",
|
|
129
|
+
vm_name,
|
|
130
|
+
json.dumps(payload),
|
|
131
|
+
],
|
|
132
|
+
capture_output=True,
|
|
133
|
+
text=True,
|
|
134
|
+
timeout=timeout,
|
|
135
|
+
)
|
|
136
|
+
if exec_result.returncode != 0:
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
resp = json.loads(exec_result.stdout)
|
|
140
|
+
pid = resp.get("return", {}).get("pid")
|
|
141
|
+
if not pid:
|
|
142
|
+
return None
|
|
143
|
+
|
|
144
|
+
deadline = time.time() + timeout
|
|
145
|
+
while time.time() < deadline:
|
|
146
|
+
status_payload = {"execute": "guest-exec-status", "arguments": {"pid": pid}}
|
|
147
|
+
status_result = subprocess.run(
|
|
148
|
+
[
|
|
149
|
+
"virsh",
|
|
150
|
+
"--connect",
|
|
151
|
+
conn_uri,
|
|
152
|
+
"qemu-agent-command",
|
|
153
|
+
vm_name,
|
|
154
|
+
json.dumps(status_payload),
|
|
155
|
+
],
|
|
156
|
+
capture_output=True,
|
|
157
|
+
text=True,
|
|
158
|
+
timeout=5,
|
|
159
|
+
)
|
|
160
|
+
if status_result.returncode != 0:
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
status_resp = json.loads(status_result.stdout)
|
|
164
|
+
ret = status_resp.get("return", {})
|
|
165
|
+
if not ret.get("exited", False):
|
|
166
|
+
time.sleep(0.3)
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
out_data = ret.get("out-data")
|
|
170
|
+
if out_data:
|
|
171
|
+
return base64.b64decode(out_data).decode().strip()
|
|
172
|
+
return ""
|
|
173
|
+
|
|
174
|
+
return None
|
|
175
|
+
except Exception:
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def run_vm_diagnostics(
|
|
180
|
+
vm_name: str,
|
|
181
|
+
conn_uri: str,
|
|
182
|
+
config_file: Optional[Path],
|
|
183
|
+
*,
|
|
184
|
+
verbose: bool = False,
|
|
185
|
+
json_output: bool = False,
|
|
186
|
+
) -> dict:
|
|
187
|
+
import subprocess
|
|
188
|
+
|
|
189
|
+
result: dict = {
|
|
190
|
+
"vm": {"name": vm_name, "conn_uri": conn_uri},
|
|
191
|
+
"state": {},
|
|
192
|
+
"network": {},
|
|
193
|
+
"qga": {},
|
|
194
|
+
"cloud_init": {},
|
|
195
|
+
"mounts": {},
|
|
196
|
+
"health": {},
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
console.print(f"[bold cyan]🧪 Diagnostics: {vm_name}[/]\n")
|
|
200
|
+
|
|
201
|
+
guest_agent_ready = _qga_ping(vm_name, conn_uri)
|
|
202
|
+
result["qga"]["ready"] = guest_agent_ready
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
domstate = subprocess.run(
|
|
206
|
+
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
207
|
+
capture_output=True,
|
|
208
|
+
text=True,
|
|
209
|
+
timeout=5,
|
|
210
|
+
)
|
|
211
|
+
result["state"] = {
|
|
212
|
+
"returncode": domstate.returncode,
|
|
213
|
+
"stdout": domstate.stdout.strip(),
|
|
214
|
+
"stderr": domstate.stderr.strip(),
|
|
215
|
+
}
|
|
216
|
+
if domstate.returncode == 0 and domstate.stdout.strip():
|
|
217
|
+
console.print(f"[green]✅ VM State: {domstate.stdout.strip()}[/]")
|
|
218
|
+
else:
|
|
219
|
+
console.print("[red]❌ VM State: unable to read[/]")
|
|
220
|
+
if verbose and domstate.stderr.strip():
|
|
221
|
+
console.print(f"[dim]{domstate.stderr.strip()}[/]")
|
|
222
|
+
except subprocess.TimeoutExpired:
|
|
223
|
+
result["state"] = {"error": "timeout"}
|
|
224
|
+
console.print("[red]❌ VM State: timeout[/]")
|
|
225
|
+
if json_output:
|
|
226
|
+
console.print_json(json.dumps(result))
|
|
227
|
+
return result
|
|
228
|
+
|
|
229
|
+
console.print("\n[bold]🔍 Checking VM network...[/]")
|
|
230
|
+
try:
|
|
231
|
+
domifaddr = subprocess.run(
|
|
232
|
+
["virsh", "--connect", conn_uri, "domifaddr", vm_name],
|
|
233
|
+
capture_output=True,
|
|
234
|
+
text=True,
|
|
235
|
+
timeout=10,
|
|
236
|
+
)
|
|
237
|
+
result["network"] = {
|
|
238
|
+
"returncode": domifaddr.returncode,
|
|
239
|
+
"stdout": domifaddr.stdout.strip(),
|
|
240
|
+
"stderr": domifaddr.stderr.strip(),
|
|
241
|
+
}
|
|
242
|
+
if domifaddr.stdout.strip():
|
|
243
|
+
console.print(f"[dim]{domifaddr.stdout.strip()}[/]")
|
|
244
|
+
else:
|
|
245
|
+
console.print("[yellow]⚠️ No interface address detected via virsh domifaddr[/]")
|
|
246
|
+
if verbose and domifaddr.stderr.strip():
|
|
247
|
+
console.print(f"[dim]{domifaddr.stderr.strip()}[/]")
|
|
248
|
+
# Fallback: try to get IP via QEMU Guest Agent (useful for slirp/user networking)
|
|
249
|
+
if guest_agent_ready:
|
|
250
|
+
try:
|
|
251
|
+
ip_out = _qga_exec(vm_name, conn_uri, "ip -4 -o addr show scope global | awk '{print $4}'", timeout=5)
|
|
252
|
+
if ip_out and ip_out.strip():
|
|
253
|
+
console.print(f"[green]IP (via QGA): {ip_out.strip()}[/]")
|
|
254
|
+
result["network"]["qga_ip"] = ip_out.strip()
|
|
255
|
+
else:
|
|
256
|
+
console.print("[dim]IP: not available via QGA[/]")
|
|
257
|
+
except Exception as e:
|
|
258
|
+
console.print(f"[dim]IP: QGA query failed ({e})[/]")
|
|
259
|
+
else:
|
|
260
|
+
console.print("[dim]IP: QEMU Guest Agent not connected[/]")
|
|
261
|
+
except Exception as e:
|
|
262
|
+
result["network"] = {"error": str(e)}
|
|
263
|
+
console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
|
|
264
|
+
|
|
265
|
+
if verbose:
|
|
266
|
+
console.print("\n[bold]🤖 QEMU Guest Agent...[/]")
|
|
267
|
+
console.print(f"{'[green]✅' if guest_agent_ready else '[red]❌'} QGA connected")
|
|
268
|
+
|
|
269
|
+
if not guest_agent_ready:
|
|
270
|
+
try:
|
|
271
|
+
dumpxml = subprocess.run(
|
|
272
|
+
["virsh", "--connect", conn_uri, "dumpxml", vm_name],
|
|
273
|
+
capture_output=True,
|
|
274
|
+
text=True,
|
|
275
|
+
timeout=10,
|
|
276
|
+
)
|
|
277
|
+
has_qga_channel = False
|
|
278
|
+
if dumpxml.returncode == 0:
|
|
279
|
+
has_qga_channel = "org.qemu.guest_agent.0" in dumpxml.stdout
|
|
280
|
+
result["qga"]["dumpxml_returncode"] = dumpxml.returncode
|
|
281
|
+
result["qga"]["has_channel"] = has_qga_channel
|
|
282
|
+
if dumpxml.stderr.strip():
|
|
283
|
+
result["qga"]["dumpxml_stderr"] = dumpxml.stderr.strip()
|
|
284
|
+
|
|
285
|
+
console.print(
|
|
286
|
+
f"[dim]Guest agent channel in VM XML: {'present' if has_qga_channel else 'missing'}[/]"
|
|
287
|
+
)
|
|
288
|
+
except Exception as e:
|
|
289
|
+
result["qga"]["dumpxml_error"] = str(e)
|
|
290
|
+
|
|
291
|
+
try:
|
|
292
|
+
ping_attempt = subprocess.run(
|
|
293
|
+
[
|
|
294
|
+
"virsh",
|
|
295
|
+
"--connect",
|
|
296
|
+
conn_uri,
|
|
297
|
+
"qemu-agent-command",
|
|
298
|
+
vm_name,
|
|
299
|
+
json.dumps({"execute": "guest-ping"}),
|
|
300
|
+
],
|
|
301
|
+
capture_output=True,
|
|
302
|
+
text=True,
|
|
303
|
+
timeout=10,
|
|
304
|
+
)
|
|
305
|
+
result["qga"]["ping_returncode"] = ping_attempt.returncode
|
|
306
|
+
result["qga"]["ping_stdout"] = ping_attempt.stdout.strip()
|
|
307
|
+
result["qga"]["ping_stderr"] = ping_attempt.stderr.strip()
|
|
308
|
+
if ping_attempt.stderr.strip():
|
|
309
|
+
console.print(f"[dim]qemu-agent-command stderr: {ping_attempt.stderr.strip()}[/]")
|
|
310
|
+
except Exception as e:
|
|
311
|
+
result["qga"]["ping_error"] = str(e)
|
|
312
|
+
|
|
313
|
+
console.print("[dim]If channel is present, the agent inside VM may not be running yet.[/]")
|
|
314
|
+
console.print("[dim]Inside VM try: sudo systemctl status qemu-guest-agent && sudo systemctl restart qemu-guest-agent[/]")
|
|
315
|
+
|
|
316
|
+
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
317
|
+
cloud_init_complete = False
|
|
318
|
+
if not guest_agent_ready:
|
|
319
|
+
result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
|
|
320
|
+
console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU guest agent not connected yet)[/]")
|
|
321
|
+
else:
|
|
322
|
+
ready_msg = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-ready 2>/dev/null || true", timeout=10)
|
|
323
|
+
result["cloud_init"]["clonebox_ready_file"] = ready_msg
|
|
324
|
+
if ready_msg and "CloneBox VM ready" in ready_msg:
|
|
325
|
+
cloud_init_complete = True
|
|
326
|
+
result["cloud_init"]["status"] = "complete"
|
|
327
|
+
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
328
|
+
else:
|
|
329
|
+
ci_status = _qga_exec(vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=10)
|
|
330
|
+
result["cloud_init"]["cloud_init_status"] = ci_status
|
|
331
|
+
result["cloud_init"]["status"] = "running"
|
|
332
|
+
console.print("[yellow]⏳ Cloud-init: Still running[/]")
|
|
333
|
+
if verbose and ci_status:
|
|
334
|
+
console.print(f"[dim]{ci_status}[/]")
|
|
335
|
+
|
|
336
|
+
console.print("\n[bold]💾 Checking mount status...[/]")
|
|
337
|
+
if not cloud_init_complete:
|
|
338
|
+
console.print("[dim]Mounts may not be ready until cloud-init completes.[/]")
|
|
339
|
+
|
|
340
|
+
mounts_detail: list = []
|
|
341
|
+
result["mounts"]["details"] = mounts_detail
|
|
342
|
+
if not guest_agent_ready:
|
|
343
|
+
console.print("[yellow]⏳ QEMU guest agent not connected yet - cannot verify mounts.[/]")
|
|
344
|
+
result["mounts"]["status"] = "unknown"
|
|
345
|
+
else:
|
|
346
|
+
if not config_file:
|
|
347
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
348
|
+
|
|
349
|
+
if not config_file.exists():
|
|
350
|
+
console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
|
|
351
|
+
result["mounts"]["status"] = "no_config"
|
|
352
|
+
else:
|
|
353
|
+
config = load_clonebox_config(config_file)
|
|
354
|
+
all_paths = config.get("paths", {}).copy()
|
|
355
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
356
|
+
result["mounts"]["expected"] = list(all_paths.values())
|
|
357
|
+
mount_output = _qga_exec(vm_name, conn_uri, "mount | grep 9p || true", timeout=10) or ""
|
|
358
|
+
mounted_paths = [line.split()[2] for line in mount_output.split("\n") if line.strip()]
|
|
359
|
+
result["mounts"]["mounted_paths"] = mounted_paths
|
|
360
|
+
|
|
361
|
+
mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
|
|
362
|
+
mount_table.add_column("Guest Path", style="bold")
|
|
363
|
+
mount_table.add_column("Mounted", justify="center")
|
|
364
|
+
mount_table.add_column("Accessible", justify="center")
|
|
365
|
+
mount_table.add_column("Files", justify="right")
|
|
366
|
+
|
|
367
|
+
working_mounts = 0
|
|
368
|
+
total_mounts = 0
|
|
369
|
+
for _, guest_path in all_paths.items():
|
|
370
|
+
total_mounts += 1
|
|
371
|
+
is_mounted = any(guest_path == mp or guest_path in mp for mp in mounted_paths)
|
|
372
|
+
accessible = False
|
|
373
|
+
file_count: str = "?"
|
|
374
|
+
|
|
375
|
+
if is_mounted:
|
|
376
|
+
test_out = _qga_exec(vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5)
|
|
377
|
+
accessible = test_out == "yes"
|
|
378
|
+
if accessible:
|
|
379
|
+
count_str = _qga_exec(vm_name, conn_uri, f"ls -A {guest_path} 2>/dev/null | wc -l", timeout=5)
|
|
380
|
+
if count_str and count_str.strip().isdigit():
|
|
381
|
+
file_count = count_str.strip()
|
|
382
|
+
|
|
383
|
+
if is_mounted and accessible:
|
|
384
|
+
working_mounts += 1
|
|
385
|
+
|
|
386
|
+
mount_table.add_row(
|
|
387
|
+
guest_path,
|
|
388
|
+
"[green]✅[/]" if is_mounted else "[red]❌[/]",
|
|
389
|
+
"[green]✅[/]" if accessible else ("[red]❌[/]" if is_mounted else "[dim]N/A[/]"),
|
|
390
|
+
file_count,
|
|
391
|
+
)
|
|
392
|
+
mounts_detail.append(
|
|
393
|
+
{
|
|
394
|
+
"guest_path": guest_path,
|
|
395
|
+
"mounted": is_mounted,
|
|
396
|
+
"accessible": accessible,
|
|
397
|
+
"files": file_count,
|
|
398
|
+
}
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
result["mounts"]["working"] = working_mounts
|
|
402
|
+
result["mounts"]["total"] = total_mounts
|
|
403
|
+
result["mounts"]["status"] = "ok" if working_mounts == total_mounts else "partial"
|
|
404
|
+
|
|
405
|
+
console.print(mount_table)
|
|
406
|
+
console.print(f"[dim]{working_mounts}/{total_mounts} mounts working[/]")
|
|
407
|
+
|
|
408
|
+
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
409
|
+
if not guest_agent_ready:
|
|
410
|
+
result["health"]["status"] = "unknown"
|
|
411
|
+
console.print("[dim]Health status: Not available yet (QEMU guest agent not ready)[/]")
|
|
412
|
+
else:
|
|
413
|
+
health_status = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10)
|
|
414
|
+
result["health"]["raw"] = health_status
|
|
415
|
+
if health_status and "HEALTH_STATUS=OK" in health_status:
|
|
416
|
+
result["health"]["status"] = "ok"
|
|
417
|
+
console.print("[green]✅ Health: All checks passed[/]")
|
|
418
|
+
elif health_status and "HEALTH_STATUS=FAILED" in health_status:
|
|
419
|
+
result["health"]["status"] = "failed"
|
|
420
|
+
console.print("[red]❌ Health: Some checks failed[/]")
|
|
421
|
+
else:
|
|
422
|
+
result["health"]["status"] = "not_run"
|
|
423
|
+
console.print("[yellow]⏳ Health check not yet run[/]")
|
|
424
|
+
if verbose and health_status:
|
|
425
|
+
console.print(f"[dim]{health_status}[/]")
|
|
426
|
+
|
|
427
|
+
if json_output:
|
|
428
|
+
console.print_json(json.dumps(result))
|
|
429
|
+
return result
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def cmd_watch(args):
|
|
433
|
+
name = args.name
|
|
434
|
+
user_session = getattr(args, "user", False)
|
|
435
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
436
|
+
refresh = getattr(args, "refresh", 1.0)
|
|
437
|
+
max_wait = getattr(args, "timeout", 600)
|
|
438
|
+
|
|
439
|
+
try:
|
|
440
|
+
vm_name, _ = _resolve_vm_name_and_config_file(name)
|
|
441
|
+
except FileNotFoundError as e:
|
|
442
|
+
console.print(f"[red]❌ {e}[/]")
|
|
443
|
+
return
|
|
444
|
+
|
|
445
|
+
console.print(f"[bold cyan]👀 Watching boot diagnostics: {vm_name}[/]")
|
|
446
|
+
console.print("[dim]Waiting for QEMU Guest Agent...[/]")
|
|
447
|
+
|
|
448
|
+
start = time.time()
|
|
449
|
+
while time.time() - start < max_wait:
|
|
450
|
+
if _qga_ping(vm_name, conn_uri):
|
|
451
|
+
break
|
|
452
|
+
time.sleep(min(refresh, 2.0))
|
|
453
|
+
|
|
454
|
+
if not _qga_ping(vm_name, conn_uri):
|
|
455
|
+
console.print("[yellow]⚠️ QEMU Guest Agent not connected - cannot watch diagnostic status yet[/]")
|
|
456
|
+
console.print(f"[dim]Try: clonebox status {name or vm_name} {'--user' if user_session else ''} --verbose[/]")
|
|
457
|
+
return
|
|
458
|
+
|
|
459
|
+
def _read_status() -> Tuple[Optional[Dict[str, Any]], str]:
|
|
460
|
+
status_raw = _qga_exec(vm_name, conn_uri, "cat /var/run/clonebox-status.json 2>/dev/null || true", timeout=10)
|
|
461
|
+
log_tail = _qga_exec(vm_name, conn_uri, "tail -n 40 /var/log/clonebox-boot.log 2>/dev/null || true", timeout=10) or ""
|
|
462
|
+
|
|
463
|
+
status_obj: Optional[Dict[str, Any]] = None
|
|
464
|
+
if status_raw:
|
|
465
|
+
try:
|
|
466
|
+
status_obj = json.loads(status_raw)
|
|
467
|
+
except Exception:
|
|
468
|
+
status_obj = None
|
|
469
|
+
return status_obj, log_tail
|
|
470
|
+
|
|
471
|
+
with Live(refresh_per_second=max(1, int(1 / max(refresh, 0.2))), console=console) as live:
|
|
472
|
+
while True:
|
|
473
|
+
status_obj, log_tail = _read_status()
|
|
474
|
+
phase = (status_obj or {}).get("phase") if status_obj else None
|
|
475
|
+
current_task = (status_obj or {}).get("current_task") if status_obj else None
|
|
476
|
+
|
|
477
|
+
header = f"phase={phase or 'unknown'}"
|
|
478
|
+
if current_task:
|
|
479
|
+
header += f" | {current_task}"
|
|
480
|
+
|
|
481
|
+
stats = ""
|
|
482
|
+
if status_obj:
|
|
483
|
+
stats = (
|
|
484
|
+
f"passed={status_obj.get('passed', 0)} failed={status_obj.get('failed', 0)} repaired={status_obj.get('repaired', 0)} total={status_obj.get('total', 0)}"
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
body = "\n".join([s for s in [header, stats, "", log_tail.strip()] if s])
|
|
488
|
+
live.update(Panel(body or "(no output yet)", title="CloneBox boot diagnostic", border_style="cyan"))
|
|
489
|
+
|
|
490
|
+
if phase == "complete":
|
|
491
|
+
break
|
|
492
|
+
|
|
493
|
+
if time.time() - start >= max_wait:
|
|
494
|
+
break
|
|
495
|
+
|
|
496
|
+
time.sleep(refresh)
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
def cmd_repair(args):
|
|
500
|
+
name = args.name
|
|
501
|
+
user_session = getattr(args, "user", False)
|
|
502
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
503
|
+
timeout = getattr(args, "timeout", 600)
|
|
504
|
+
follow = getattr(args, "watch", False)
|
|
505
|
+
|
|
506
|
+
try:
|
|
507
|
+
vm_name, _ = _resolve_vm_name_and_config_file(name)
|
|
508
|
+
except FileNotFoundError as e:
|
|
509
|
+
console.print(f"[red]❌ {e}[/]")
|
|
510
|
+
return
|
|
511
|
+
|
|
512
|
+
if not _qga_ping(vm_name, conn_uri):
|
|
513
|
+
console.print("[yellow]⚠️ QEMU Guest Agent not connected - cannot trigger repair[/]")
|
|
514
|
+
console.print("[dim]Inside VM you can run: sudo /usr/local/bin/clonebox-boot-diagnostic[/]")
|
|
515
|
+
return
|
|
516
|
+
|
|
517
|
+
console.print(f"[cyan]🔧 Running boot diagnostic/repair in VM: {vm_name}[/]")
|
|
518
|
+
out = _qga_exec(vm_name, conn_uri, "/usr/local/bin/clonebox-boot-diagnostic || true", timeout=timeout)
|
|
519
|
+
if out is None:
|
|
520
|
+
console.print("[yellow]⚠️ Repair triggered but output not available via QGA (check VM console/log)[/]")
|
|
521
|
+
elif out.strip():
|
|
522
|
+
console.print(Panel(out.strip()[-3000:], title="Command output", border_style="cyan"))
|
|
523
|
+
|
|
524
|
+
if follow:
|
|
525
|
+
cmd_watch(
|
|
526
|
+
argparse.Namespace(
|
|
527
|
+
name=name,
|
|
528
|
+
user=user_session,
|
|
529
|
+
refresh=getattr(args, "refresh", 1.0),
|
|
530
|
+
timeout=timeout,
|
|
531
|
+
)
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
def interactive_mode():
|
|
536
|
+
"""Run the interactive VM creation wizard."""
|
|
537
|
+
print_banner()
|
|
538
|
+
|
|
539
|
+
console.print("[bold cyan]🔍 Detecting system state...[/]\n")
|
|
540
|
+
|
|
541
|
+
with Progress(
|
|
542
|
+
SpinnerColumn(),
|
|
543
|
+
TextColumn("[progress.description]{task.description}"),
|
|
544
|
+
console=console,
|
|
545
|
+
transient=True,
|
|
546
|
+
) as progress:
|
|
547
|
+
task = progress.add_task("Scanning services, apps, and paths...", total=None)
|
|
548
|
+
detector = SystemDetector()
|
|
549
|
+
snapshot = detector.detect_all()
|
|
550
|
+
sys_info = detector.get_system_info()
|
|
551
|
+
docker_containers = detector.detect_docker_containers()
|
|
552
|
+
|
|
553
|
+
# Show system info
|
|
554
|
+
console.print(
|
|
555
|
+
Panel(
|
|
556
|
+
f"[bold]Hostname:[/] {sys_info['hostname']}\n"
|
|
557
|
+
f"[bold]User:[/] {sys_info['user']}\n"
|
|
558
|
+
f"[bold]CPU:[/] {sys_info['cpu_count']} cores\n"
|
|
559
|
+
f"[bold]RAM:[/] {sys_info['memory_available_gb']:.1f} / {sys_info['memory_total_gb']:.1f} GB available\n"
|
|
560
|
+
f"[bold]Disk:[/] {sys_info['disk_free_gb']:.1f} / {sys_info['disk_total_gb']:.1f} GB free",
|
|
561
|
+
title="[bold cyan]System Info[/]",
|
|
562
|
+
border_style="cyan",
|
|
563
|
+
)
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
console.print()
|
|
567
|
+
|
|
568
|
+
# === VM Name ===
|
|
569
|
+
vm_name = questionary.text("VM name:", default="clonebox-vm", style=custom_style).ask()
|
|
570
|
+
|
|
571
|
+
if not vm_name:
|
|
572
|
+
console.print("[red]Cancelled.[/]")
|
|
573
|
+
return
|
|
574
|
+
|
|
575
|
+
# === RAM ===
|
|
576
|
+
max_ram = int(sys_info["memory_available_gb"] * 1024 * 0.75) # 75% of available
|
|
577
|
+
default_ram = min(4096, max_ram)
|
|
578
|
+
|
|
579
|
+
ram_mb = questionary.text(
|
|
580
|
+
f"RAM (MB) [max recommended: {max_ram}]:", default=str(default_ram), style=custom_style
|
|
581
|
+
).ask()
|
|
582
|
+
ram_mb = int(ram_mb) if ram_mb else default_ram
|
|
583
|
+
|
|
584
|
+
# === vCPUs ===
|
|
585
|
+
max_vcpus = sys_info["cpu_count"]
|
|
586
|
+
default_vcpus = max(2, max_vcpus // 2)
|
|
587
|
+
|
|
588
|
+
vcpus = questionary.text(
|
|
589
|
+
f"vCPUs [max: {max_vcpus}]:", default=str(default_vcpus), style=custom_style
|
|
590
|
+
).ask()
|
|
591
|
+
vcpus = int(vcpus) if vcpus else default_vcpus
|
|
592
|
+
|
|
593
|
+
# === Services Selection ===
|
|
594
|
+
console.print("\n[bold cyan]📦 Select services to include in VM:[/]")
|
|
595
|
+
|
|
596
|
+
service_choices = []
|
|
597
|
+
for svc in snapshot.running_services:
|
|
598
|
+
label = f"{svc.name} ({svc.status})"
|
|
599
|
+
if svc.description:
|
|
600
|
+
label += f" - {svc.description[:40]}"
|
|
601
|
+
service_choices.append(questionary.Choice(label, value=svc.name))
|
|
602
|
+
|
|
603
|
+
selected_services = []
|
|
604
|
+
if service_choices:
|
|
605
|
+
selected_services = (
|
|
606
|
+
questionary.checkbox(
|
|
607
|
+
"Services (space to select, enter to confirm):",
|
|
608
|
+
choices=service_choices,
|
|
609
|
+
style=custom_style,
|
|
610
|
+
).ask()
|
|
611
|
+
or []
|
|
612
|
+
)
|
|
613
|
+
else:
|
|
614
|
+
console.print("[dim] No interesting services detected[/]")
|
|
615
|
+
|
|
616
|
+
# === Applications/Processes Selection ===
|
|
617
|
+
console.print("\n[bold cyan]🚀 Select applications to track:[/]")
|
|
618
|
+
|
|
619
|
+
app_choices = []
|
|
620
|
+
for app in snapshot.running_apps[:20]: # Limit to top 20
|
|
621
|
+
label = f"{app.name} (PID: {app.pid}, {app.memory_mb:.0f} MB)"
|
|
622
|
+
if app.working_dir:
|
|
623
|
+
label += f" @ {app.working_dir[:30]}"
|
|
624
|
+
app_choices.append(questionary.Choice(label, value=app))
|
|
625
|
+
|
|
626
|
+
selected_apps = []
|
|
627
|
+
if app_choices:
|
|
628
|
+
selected_apps = (
|
|
629
|
+
questionary.checkbox(
|
|
630
|
+
"Applications (will add their working dirs):",
|
|
631
|
+
choices=app_choices,
|
|
632
|
+
style=custom_style,
|
|
633
|
+
).ask()
|
|
634
|
+
or []
|
|
635
|
+
)
|
|
636
|
+
else:
|
|
637
|
+
console.print("[dim] No interesting applications detected[/]")
|
|
638
|
+
|
|
639
|
+
# === Docker Containers ===
|
|
640
|
+
if docker_containers:
|
|
641
|
+
console.print("\n[bold cyan]🐳 Docker containers detected:[/]")
|
|
642
|
+
|
|
643
|
+
container_choices = [
|
|
644
|
+
questionary.Choice(f"{c['name']} ({c['image']}) - {c['status']}", value=c["name"])
|
|
645
|
+
for c in docker_containers
|
|
646
|
+
]
|
|
647
|
+
|
|
648
|
+
selected_containers = (
|
|
649
|
+
questionary.checkbox(
|
|
650
|
+
"Containers (will share docker socket):",
|
|
651
|
+
choices=container_choices,
|
|
652
|
+
style=custom_style,
|
|
653
|
+
).ask()
|
|
654
|
+
or []
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
# If any docker selected, add docker socket
|
|
658
|
+
if selected_containers:
|
|
659
|
+
if "docker" not in selected_services:
|
|
660
|
+
selected_services.append("docker")
|
|
661
|
+
|
|
662
|
+
# === Paths Selection ===
|
|
663
|
+
console.print("\n[bold cyan]📁 Select paths to mount in VM:[/]")
|
|
664
|
+
|
|
665
|
+
# Group paths by type
|
|
666
|
+
path_groups = {}
|
|
667
|
+
for p in snapshot.paths:
|
|
668
|
+
if p.type not in path_groups:
|
|
669
|
+
path_groups[p.type] = []
|
|
670
|
+
path_groups[p.type].append(p)
|
|
671
|
+
|
|
672
|
+
path_choices = []
|
|
673
|
+
for ptype in ["project", "config", "data"]:
|
|
674
|
+
if ptype in path_groups:
|
|
675
|
+
for p in path_groups[ptype]:
|
|
676
|
+
size_str = f"{p.size_mb:.0f} MB" if p.size_mb > 0 else "?"
|
|
677
|
+
label = f"[{ptype}] {p.path} ({size_str})"
|
|
678
|
+
if p.description:
|
|
679
|
+
label += f" - {p.description}"
|
|
680
|
+
path_choices.append(questionary.Choice(label, value=p.path))
|
|
681
|
+
|
|
682
|
+
selected_paths = []
|
|
683
|
+
if path_choices:
|
|
684
|
+
selected_paths = (
|
|
685
|
+
questionary.checkbox(
|
|
686
|
+
"Paths (will be bind-mounted read-write):", choices=path_choices, style=custom_style
|
|
687
|
+
).ask()
|
|
688
|
+
or []
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
# Add working directories from selected applications
|
|
692
|
+
for app in selected_apps:
|
|
693
|
+
if app.working_dir and app.working_dir not in selected_paths:
|
|
694
|
+
selected_paths.append(app.working_dir)
|
|
695
|
+
|
|
696
|
+
# === Additional Packages ===
|
|
697
|
+
console.print("\n[bold cyan]📦 Additional packages to install:[/]")
|
|
698
|
+
|
|
699
|
+
common_packages = [
|
|
700
|
+
"build-essential",
|
|
701
|
+
"git",
|
|
702
|
+
"curl",
|
|
703
|
+
"wget",
|
|
704
|
+
"vim",
|
|
705
|
+
"htop",
|
|
706
|
+
"python3",
|
|
707
|
+
"python3-pip",
|
|
708
|
+
"python3-venv",
|
|
709
|
+
"nodejs",
|
|
710
|
+
"npm",
|
|
711
|
+
"docker.io",
|
|
712
|
+
"docker-compose",
|
|
713
|
+
"nginx",
|
|
714
|
+
"postgresql",
|
|
715
|
+
"redis",
|
|
716
|
+
]
|
|
717
|
+
|
|
718
|
+
pkg_choices = [questionary.Choice(pkg, value=pkg) for pkg in common_packages]
|
|
719
|
+
|
|
720
|
+
selected_packages = (
|
|
721
|
+
questionary.checkbox(
|
|
722
|
+
"Packages (space to select):", choices=pkg_choices, style=custom_style
|
|
723
|
+
).ask()
|
|
724
|
+
or []
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
# Add custom packages
|
|
728
|
+
custom_pkgs = questionary.text(
|
|
729
|
+
"Additional packages (space-separated):", default="", style=custom_style
|
|
730
|
+
).ask()
|
|
731
|
+
|
|
732
|
+
if custom_pkgs:
|
|
733
|
+
selected_packages.extend(custom_pkgs.split())
|
|
734
|
+
|
|
735
|
+
# === Base Image ===
|
|
736
|
+
base_image = questionary.text(
|
|
737
|
+
"Base image path (optional, leave empty for blank disk):", default="", style=custom_style
|
|
738
|
+
).ask()
|
|
739
|
+
|
|
740
|
+
# === GUI ===
|
|
741
|
+
enable_gui = questionary.confirm(
|
|
742
|
+
"Enable SPICE graphics (GUI)?", default=True, style=custom_style
|
|
743
|
+
).ask()
|
|
744
|
+
|
|
745
|
+
# === Summary ===
|
|
746
|
+
console.print("\n")
|
|
747
|
+
|
|
748
|
+
# Build paths mapping
|
|
749
|
+
paths_mapping = {}
|
|
750
|
+
for idx, host_path in enumerate(selected_paths):
|
|
751
|
+
guest_path = f"/mnt/host{idx}"
|
|
752
|
+
paths_mapping[host_path] = guest_path
|
|
753
|
+
|
|
754
|
+
# Summary table
|
|
755
|
+
summary_table = Table(title="VM Configuration Summary", border_style="cyan")
|
|
756
|
+
summary_table.add_column("Setting", style="bold")
|
|
757
|
+
summary_table.add_column("Value")
|
|
758
|
+
|
|
759
|
+
summary_table.add_row("Name", vm_name)
|
|
760
|
+
summary_table.add_row("RAM", f"{ram_mb} MB")
|
|
761
|
+
summary_table.add_row("vCPUs", str(vcpus))
|
|
762
|
+
summary_table.add_row("Disk", f"{20 if enable_gui else 10} GB")
|
|
763
|
+
summary_table.add_row("Services", ", ".join(selected_services) or "None")
|
|
764
|
+
summary_table.add_row(
|
|
765
|
+
"Packages",
|
|
766
|
+
", ".join(selected_packages[:5]) + ("..." if len(selected_packages) > 5 else "") or "None",
|
|
767
|
+
)
|
|
768
|
+
summary_table.add_row("Paths", f"{len(paths_mapping)} bind mounts")
|
|
769
|
+
summary_table.add_row("GUI", "Yes (SPICE)" if enable_gui else "No")
|
|
770
|
+
|
|
771
|
+
console.print(summary_table)
|
|
772
|
+
|
|
773
|
+
if paths_mapping:
|
|
774
|
+
console.print("\n[bold]Bind mounts:[/]")
|
|
775
|
+
for host, guest in paths_mapping.items():
|
|
776
|
+
console.print(f" [cyan]{host}[/] → [green]{guest}[/]")
|
|
777
|
+
|
|
778
|
+
console.print()
|
|
779
|
+
|
|
780
|
+
# === Confirm ===
|
|
781
|
+
if not questionary.confirm(
|
|
782
|
+
"Create VM with these settings?", default=True, style=custom_style
|
|
783
|
+
).ask():
|
|
784
|
+
console.print("[yellow]Cancelled.[/]")
|
|
785
|
+
return
|
|
786
|
+
|
|
787
|
+
# === Create VM ===
|
|
788
|
+
console.print("\n[bold cyan]🔧 Creating VM...[/]\n")
|
|
789
|
+
|
|
790
|
+
config = VMConfig(
|
|
791
|
+
name=vm_name,
|
|
792
|
+
ram_mb=ram_mb,
|
|
793
|
+
vcpus=vcpus,
|
|
794
|
+
disk_size_gb=20 if enable_gui else 10,
|
|
795
|
+
gui=enable_gui,
|
|
796
|
+
base_image=base_image if base_image else None,
|
|
797
|
+
paths=paths_mapping,
|
|
798
|
+
packages=selected_packages,
|
|
799
|
+
services=selected_services,
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
try:
|
|
803
|
+
cloner = SelectiveVMCloner(user_session=user_session)
|
|
804
|
+
|
|
805
|
+
# Check prerequisites
|
|
806
|
+
checks = cloner.check_prerequisites()
|
|
807
|
+
if not all(checks.values()):
|
|
808
|
+
console.print("[yellow]⚠️ Prerequisites check:[/]")
|
|
809
|
+
for check, passed in checks.items():
|
|
810
|
+
icon = "✅" if passed else "❌"
|
|
811
|
+
console.print(f" {icon} {check}")
|
|
812
|
+
|
|
813
|
+
if not checks["libvirt_connected"]:
|
|
814
|
+
console.print("\n[red]Cannot proceed without libvirt connection.[/]")
|
|
815
|
+
console.print("Try: [cyan]sudo systemctl start libvirtd[/]")
|
|
816
|
+
return
|
|
817
|
+
|
|
818
|
+
vm_uuid = cloner.create_vm(config, console=console)
|
|
819
|
+
|
|
820
|
+
# Ask to start
|
|
821
|
+
if questionary.confirm("Start VM now?", default=True, style=custom_style).ask():
|
|
822
|
+
cloner.start_vm(vm_name, open_viewer=enable_gui, console=console)
|
|
823
|
+
console.print("\n[bold green]🎉 VM is running![/]")
|
|
824
|
+
console.print(f"\n[dim]UUID: {vm_uuid}[/]")
|
|
825
|
+
|
|
826
|
+
if paths_mapping:
|
|
827
|
+
console.print("\n[bold]Inside the VM, mount shared folders with:[/]")
|
|
828
|
+
for idx, (host, guest) in enumerate(paths_mapping.items()):
|
|
829
|
+
console.print(f" [cyan]sudo mount -t 9p -o trans=virtio mount{idx} {guest}[/]")
|
|
830
|
+
except Exception as e:
|
|
831
|
+
console.print(f"\n[red]❌ Error: {e}[/]")
|
|
832
|
+
raise
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
def cmd_create(args):
|
|
836
|
+
"""Create VM from JSON config."""
|
|
837
|
+
config_data = json.loads(args.config)
|
|
838
|
+
|
|
839
|
+
config = VMConfig(
|
|
840
|
+
name=args.name,
|
|
841
|
+
ram_mb=args.ram,
|
|
842
|
+
vcpus=args.vcpus,
|
|
843
|
+
disk_size_gb=getattr(args, "disk_size_gb", 10),
|
|
844
|
+
gui=not args.no_gui,
|
|
845
|
+
base_image=args.base_image,
|
|
846
|
+
paths=config_data.get("paths", {}),
|
|
847
|
+
packages=config_data.get("packages", []),
|
|
848
|
+
services=config_data.get("services", []),
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
cloner = SelectiveVMCloner()
|
|
852
|
+
vm_uuid = cloner.create_vm(config, console=console)
|
|
853
|
+
|
|
854
|
+
if args.start:
|
|
855
|
+
cloner.start_vm(args.name, open_viewer=not args.no_gui, console=console)
|
|
856
|
+
|
|
857
|
+
console.print(f"[green]✅ VM created: {vm_uuid}[/]")
|
|
858
|
+
|
|
859
|
+
|
|
860
|
+
def cmd_start(args):
|
|
861
|
+
"""Start a VM or create from .clonebox.yaml."""
|
|
862
|
+
name = args.name
|
|
863
|
+
|
|
864
|
+
# Check if it's a path (contains / or . or ~)
|
|
865
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
866
|
+
# Treat as path - load .clonebox.yaml
|
|
867
|
+
target_path = Path(name).expanduser().resolve()
|
|
868
|
+
|
|
869
|
+
if target_path.is_dir():
|
|
870
|
+
config_file = target_path / CLONEBOX_CONFIG_FILE
|
|
871
|
+
else:
|
|
872
|
+
config_file = target_path
|
|
873
|
+
|
|
874
|
+
if not config_file.exists():
|
|
875
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
876
|
+
console.print(f"[dim]Run 'clonebox clone {target_path}' first to generate config[/]")
|
|
877
|
+
return
|
|
878
|
+
|
|
879
|
+
console.print(f"[bold cyan]📦 Loading config: {config_file}[/]\n")
|
|
880
|
+
|
|
881
|
+
config = load_clonebox_config(config_file)
|
|
882
|
+
vm_name = config["vm"]["name"]
|
|
883
|
+
|
|
884
|
+
# Check if VM already exists
|
|
885
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
886
|
+
try:
|
|
887
|
+
existing_vms = [v["name"] for v in cloner.list_vms()]
|
|
888
|
+
if vm_name in existing_vms:
|
|
889
|
+
console.print(f"[cyan]VM '{vm_name}' exists, starting...[/]")
|
|
890
|
+
cloner.start_vm(vm_name, open_viewer=not args.no_viewer, console=console)
|
|
891
|
+
return
|
|
892
|
+
except:
|
|
893
|
+
pass
|
|
894
|
+
|
|
895
|
+
# Create new VM from config
|
|
896
|
+
console.print(f"[cyan]Creating VM '{vm_name}' from config...[/]\n")
|
|
897
|
+
vm_uuid = create_vm_from_config(config, start=True, user_session=getattr(args, "user", False))
|
|
898
|
+
console.print(f"\n[bold green]🎉 VM '{vm_name}' is running![/]")
|
|
899
|
+
console.print(f"[dim]UUID: {vm_uuid}[/]")
|
|
900
|
+
|
|
901
|
+
if config.get("paths"):
|
|
902
|
+
console.print("\n[bold]Inside VM, mount paths with:[/]")
|
|
903
|
+
for idx, (host, guest) in enumerate(config["paths"].items()):
|
|
904
|
+
console.print(f" [cyan]sudo mount -t 9p -o trans=virtio mount{idx} {guest}[/]")
|
|
905
|
+
return
|
|
906
|
+
|
|
907
|
+
# Default: treat as VM name
|
|
908
|
+
if not name:
|
|
909
|
+
# No argument - check current directory for .clonebox.yaml
|
|
910
|
+
config_file = Path.cwd() / CLONEBOX_CONFIG_FILE
|
|
911
|
+
if config_file.exists():
|
|
912
|
+
console.print(f"[cyan]Found {CLONEBOX_CONFIG_FILE} in current directory[/]")
|
|
913
|
+
args.name = "."
|
|
914
|
+
return cmd_start(args)
|
|
915
|
+
else:
|
|
916
|
+
console.print(
|
|
917
|
+
"[red]❌ No VM name specified and no .clonebox.yaml in current directory[/]"
|
|
918
|
+
)
|
|
919
|
+
console.print("[dim]Usage: clonebox start <vm-name> or clonebox start .[/]")
|
|
920
|
+
return
|
|
921
|
+
|
|
922
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
923
|
+
open_viewer = getattr(args, "viewer", False) or not getattr(args, "no_viewer", False)
|
|
924
|
+
cloner.start_vm(name, open_viewer=open_viewer, console=console)
|
|
925
|
+
|
|
926
|
+
|
|
927
|
+
def cmd_open(args):
|
|
928
|
+
"""Open VM viewer window."""
|
|
929
|
+
import subprocess
|
|
930
|
+
|
|
931
|
+
name = args.name
|
|
932
|
+
user_session = getattr(args, "user", False)
|
|
933
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
934
|
+
|
|
935
|
+
# If name is a path, load config
|
|
936
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
937
|
+
target_path = Path(name).expanduser().resolve()
|
|
938
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
939
|
+
if config_file.exists():
|
|
940
|
+
config = load_clonebox_config(config_file)
|
|
941
|
+
name = config["vm"]["name"]
|
|
942
|
+
else:
|
|
943
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
944
|
+
return
|
|
945
|
+
elif name == "." or not name:
|
|
946
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
947
|
+
if config_file.exists():
|
|
948
|
+
config = load_clonebox_config(config_file)
|
|
949
|
+
name = config["vm"]["name"]
|
|
950
|
+
else:
|
|
951
|
+
console.print("[red]❌ No VM name specified and no .clonebox.yaml in current directory[/]")
|
|
952
|
+
console.print("[dim]Usage: clonebox open <vm-name> or clonebox open .[/]")
|
|
953
|
+
return
|
|
954
|
+
|
|
955
|
+
# Check if VM is running
|
|
956
|
+
try:
|
|
957
|
+
result = subprocess.run(
|
|
958
|
+
["virsh", "--connect", conn_uri, "domstate", name],
|
|
959
|
+
capture_output=True, text=True, timeout=10
|
|
960
|
+
)
|
|
961
|
+
state = result.stdout.strip()
|
|
962
|
+
|
|
963
|
+
if state != "running":
|
|
964
|
+
console.print(f"[yellow]⚠️ VM '{name}' is not running (state: {state})[/]")
|
|
965
|
+
if questionary.confirm(
|
|
966
|
+
f"Start VM '{name}' and open viewer?", default=True, style=custom_style
|
|
967
|
+
).ask():
|
|
968
|
+
cloner = SelectiveVMCloner(user_session=user_session)
|
|
969
|
+
cloner.start_vm(name, open_viewer=True, console=console)
|
|
970
|
+
else:
|
|
971
|
+
console.print("[dim]Use 'clonebox start' to start the VM first.[/]")
|
|
972
|
+
return
|
|
973
|
+
except Exception as e:
|
|
974
|
+
console.print(f"[red]❌ Error checking VM state: {e}[/]")
|
|
975
|
+
return
|
|
976
|
+
|
|
977
|
+
# Open virt-viewer
|
|
978
|
+
console.print(f"[cyan]Opening viewer for VM: {name}[/]")
|
|
979
|
+
try:
|
|
980
|
+
subprocess.run(
|
|
981
|
+
["virt-viewer", "--connect", conn_uri, name],
|
|
982
|
+
check=True
|
|
983
|
+
)
|
|
984
|
+
except FileNotFoundError:
|
|
985
|
+
console.print("[red]❌ virt-viewer not found[/]")
|
|
986
|
+
console.print("Install with: sudo apt install virt-viewer")
|
|
987
|
+
except subprocess.CalledProcessError as e:
|
|
988
|
+
console.print(f"[red]❌ Failed to open viewer: {e}[/]")
|
|
989
|
+
|
|
990
|
+
|
|
991
|
+
def cmd_stop(args):
|
|
992
|
+
"""Stop a VM."""
|
|
993
|
+
name = args.name
|
|
994
|
+
|
|
995
|
+
# If name is a path, load config
|
|
996
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
997
|
+
target_path = Path(name).expanduser().resolve()
|
|
998
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
999
|
+
if config_file.exists():
|
|
1000
|
+
config = load_clonebox_config(config_file)
|
|
1001
|
+
name = config["vm"]["name"]
|
|
1002
|
+
else:
|
|
1003
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1004
|
+
return
|
|
1005
|
+
|
|
1006
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1007
|
+
cloner.stop_vm(name, force=args.force, console=console)
|
|
1008
|
+
|
|
1009
|
+
|
|
1010
|
+
def cmd_restart(args):
|
|
1011
|
+
"""Restart a VM (stop and start)."""
|
|
1012
|
+
name = args.name
|
|
1013
|
+
|
|
1014
|
+
# If name is a path, load config
|
|
1015
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1016
|
+
target_path = Path(name).expanduser().resolve()
|
|
1017
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
1018
|
+
if config_file.exists():
|
|
1019
|
+
config = load_clonebox_config(config_file)
|
|
1020
|
+
name = config["vm"]["name"]
|
|
1021
|
+
else:
|
|
1022
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1023
|
+
return
|
|
1024
|
+
|
|
1025
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1026
|
+
|
|
1027
|
+
# Stop the VM
|
|
1028
|
+
console.print("[bold yellow]🔄 Stopping VM...[/]")
|
|
1029
|
+
cloner.stop_vm(name, force=args.force, console=console)
|
|
1030
|
+
|
|
1031
|
+
# Wait a moment
|
|
1032
|
+
time.sleep(2)
|
|
1033
|
+
|
|
1034
|
+
# Start the VM
|
|
1035
|
+
console.print("[bold green]🚀 Starting VM...[/]")
|
|
1036
|
+
cloner.start_vm(name, wait_for_agent=True, console=console)
|
|
1037
|
+
|
|
1038
|
+
console.print("[bold green]✅ VM restarted successfully![/]")
|
|
1039
|
+
if getattr(args, "open", False):
|
|
1040
|
+
cloner.open_gui(name, console=console)
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
def cmd_delete(args):
|
|
1044
|
+
"""Delete a VM."""
|
|
1045
|
+
name = args.name
|
|
1046
|
+
|
|
1047
|
+
# If name is a path, load config
|
|
1048
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1049
|
+
target_path = Path(name).expanduser().resolve()
|
|
1050
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
1051
|
+
if config_file.exists():
|
|
1052
|
+
config = load_clonebox_config(config_file)
|
|
1053
|
+
name = config["vm"]["name"]
|
|
1054
|
+
else:
|
|
1055
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1056
|
+
return
|
|
1057
|
+
|
|
1058
|
+
if not args.yes:
|
|
1059
|
+
if not questionary.confirm(
|
|
1060
|
+
f"Delete VM '{name}' and its storage?", default=False, style=custom_style
|
|
1061
|
+
).ask():
|
|
1062
|
+
console.print("[yellow]Cancelled.[/]")
|
|
1063
|
+
return
|
|
1064
|
+
|
|
1065
|
+
|
|
1066
|
+
def cmd_list(args):
|
|
1067
|
+
"""List all VMs."""
|
|
1068
|
+
cloner = SelectiveVMCloner(user_session=getattr(args, "user", False))
|
|
1069
|
+
vms = cloner.list_vms()
|
|
1070
|
+
|
|
1071
|
+
if getattr(args, "json", False):
|
|
1072
|
+
print(json.dumps(vms, indent=2))
|
|
1073
|
+
return
|
|
1074
|
+
|
|
1075
|
+
if not vms:
|
|
1076
|
+
console.print("[dim]No VMs found.[/]")
|
|
1077
|
+
return
|
|
1078
|
+
|
|
1079
|
+
table = Table(title="Virtual Machines", border_style="cyan")
|
|
1080
|
+
table.add_column("Name", style="bold")
|
|
1081
|
+
table.add_column("State")
|
|
1082
|
+
table.add_column("UUID", style="dim")
|
|
1083
|
+
|
|
1084
|
+
for vm in vms:
|
|
1085
|
+
state_style = "green" if vm["state"] == "running" else "dim"
|
|
1086
|
+
table.add_row(vm["name"], f"[{state_style}]{vm['state']}[/]", vm["uuid"][:8])
|
|
1087
|
+
|
|
1088
|
+
console.print(table)
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
def cmd_container_up(args):
|
|
1092
|
+
"""Start a container sandbox."""
|
|
1093
|
+
mounts = {}
|
|
1094
|
+
for m in getattr(args, "mount", []) or []:
|
|
1095
|
+
if ":" not in m:
|
|
1096
|
+
raise ValueError(f"Invalid mount: {m} (expected HOST:CONTAINER)")
|
|
1097
|
+
host, container_path = m.split(":", 1)
|
|
1098
|
+
mounts[host] = container_path
|
|
1099
|
+
|
|
1100
|
+
cfg_kwargs: dict = {
|
|
1101
|
+
"engine": getattr(args, "engine", "auto"),
|
|
1102
|
+
"image": getattr(args, "image", "ubuntu:22.04"),
|
|
1103
|
+
"workspace": Path(getattr(args, "path", ".")),
|
|
1104
|
+
"extra_mounts": mounts,
|
|
1105
|
+
"env_from_dotenv": not getattr(args, "no_dotenv", False),
|
|
1106
|
+
"packages": getattr(args, "package", []) or [],
|
|
1107
|
+
"ports": getattr(args, "port", []) or [],
|
|
1108
|
+
}
|
|
1109
|
+
if getattr(args, "name", None):
|
|
1110
|
+
cfg_kwargs["name"] = args.name
|
|
1111
|
+
|
|
1112
|
+
profile_name = getattr(args, "profile", None)
|
|
1113
|
+
if profile_name:
|
|
1114
|
+
merged = merge_with_profile({"container": cfg_kwargs}, profile_name)
|
|
1115
|
+
if isinstance(merged, dict) and isinstance(merged.get("container"), dict):
|
|
1116
|
+
cfg_kwargs = merged["container"]
|
|
1117
|
+
|
|
1118
|
+
cfg = ContainerConfig(**cfg_kwargs)
|
|
1119
|
+
|
|
1120
|
+
cloner = ContainerCloner(engine=cfg.engine)
|
|
1121
|
+
detach = getattr(args, "detach", False)
|
|
1122
|
+
cloner.up(cfg, detach=detach, remove=not detach)
|
|
1123
|
+
|
|
1124
|
+
|
|
1125
|
+
def cmd_container_ps(args):
|
|
1126
|
+
"""List containers."""
|
|
1127
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1128
|
+
items = cloner.ps(all=getattr(args, "all", False))
|
|
1129
|
+
|
|
1130
|
+
if getattr(args, "json", False):
|
|
1131
|
+
print(json.dumps(items, indent=2))
|
|
1132
|
+
return
|
|
1133
|
+
|
|
1134
|
+
if not items:
|
|
1135
|
+
console.print("[dim]No containers found.[/]")
|
|
1136
|
+
return
|
|
1137
|
+
|
|
1138
|
+
table = Table(title="Containers", border_style="cyan")
|
|
1139
|
+
table.add_column("Name", style="bold")
|
|
1140
|
+
table.add_column("Image")
|
|
1141
|
+
table.add_column("Status")
|
|
1142
|
+
table.add_column("Ports")
|
|
1143
|
+
|
|
1144
|
+
for c in items:
|
|
1145
|
+
table.add_row(
|
|
1146
|
+
str(c.get("name", "")),
|
|
1147
|
+
str(c.get("image", "")),
|
|
1148
|
+
str(c.get("status", "")),
|
|
1149
|
+
str(c.get("ports", "")),
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
console.print(table)
|
|
1153
|
+
|
|
1154
|
+
|
|
1155
|
+
def cmd_container_stop(args):
|
|
1156
|
+
"""Stop a container."""
|
|
1157
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1158
|
+
cloner.stop(args.name)
|
|
1159
|
+
|
|
1160
|
+
|
|
1161
|
+
def cmd_container_rm(args):
|
|
1162
|
+
"""Remove a container."""
|
|
1163
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1164
|
+
cloner.rm(args.name, force=getattr(args, "force", False))
|
|
1165
|
+
|
|
1166
|
+
|
|
1167
|
+
def cmd_container_down(args):
|
|
1168
|
+
"""Stop and remove a container."""
|
|
1169
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1170
|
+
cloner.stop(args.name)
|
|
1171
|
+
cloner.rm(args.name, force=True)
|
|
1172
|
+
|
|
1173
|
+
|
|
1174
|
+
def cmd_dashboard(args):
|
|
1175
|
+
"""Run the local CloneBox dashboard."""
|
|
1176
|
+
try:
|
|
1177
|
+
from clonebox.dashboard import run_dashboard
|
|
1178
|
+
except Exception as e:
|
|
1179
|
+
console.print("[red]❌ Dashboard dependencies are not installed.[/]")
|
|
1180
|
+
console.print("[dim]Install with: pip install 'clonebox[dashboard]'[/]")
|
|
1181
|
+
console.print(f"[dim]{e}[/]")
|
|
1182
|
+
return
|
|
1183
|
+
|
|
1184
|
+
run_dashboard(port=getattr(args, "port", 8080))
|
|
1185
|
+
|
|
1186
|
+
|
|
1187
|
+
def cmd_diagnose(args):
|
|
1188
|
+
"""Run detailed VM diagnostics (standalone)."""
|
|
1189
|
+
name = args.name
|
|
1190
|
+
user_session = getattr(args, "user", False)
|
|
1191
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1192
|
+
|
|
1193
|
+
try:
|
|
1194
|
+
vm_name, config_file = _resolve_vm_name_and_config_file(name)
|
|
1195
|
+
except FileNotFoundError as e:
|
|
1196
|
+
console.print(f"[red]❌ {e}[/]")
|
|
1197
|
+
return
|
|
1198
|
+
|
|
1199
|
+
run_vm_diagnostics(
|
|
1200
|
+
vm_name,
|
|
1201
|
+
conn_uri,
|
|
1202
|
+
config_file,
|
|
1203
|
+
verbose=getattr(args, "verbose", False),
|
|
1204
|
+
json_output=getattr(args, "json", False),
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
def cmd_status(args):
|
|
1209
|
+
"""Check VM installation status and health from workstation."""
|
|
1210
|
+
import subprocess
|
|
1211
|
+
|
|
1212
|
+
name = args.name
|
|
1213
|
+
user_session = getattr(args, "user", False)
|
|
1214
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1215
|
+
|
|
1216
|
+
try:
|
|
1217
|
+
vm_name, config_file = _resolve_vm_name_and_config_file(name)
|
|
1218
|
+
except FileNotFoundError as e:
|
|
1219
|
+
console.print(f"[red]❌ {e}[/]")
|
|
1220
|
+
return
|
|
1221
|
+
|
|
1222
|
+
run_vm_diagnostics(vm_name, conn_uri, config_file, verbose=False, json_output=False)
|
|
1223
|
+
|
|
1224
|
+
# Show useful commands
|
|
1225
|
+
console.print("\n[bold]📋 Useful commands:[/]")
|
|
1226
|
+
console.print(f" [cyan]virt-viewer --connect {conn_uri} {vm_name}[/] # Open GUI")
|
|
1227
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/] # Console access")
|
|
1228
|
+
console.print(" [dim]Inside VM:[/]")
|
|
1229
|
+
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
|
|
1230
|
+
console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
|
|
1231
|
+
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
1232
|
+
console.print(" [dim]On host:[/]")
|
|
1233
|
+
console.print(" [cyan]clonebox test . --user --validate[/] # Full validation (mounts/packages/services)")
|
|
1234
|
+
|
|
1235
|
+
# Run full health check if requested
|
|
1236
|
+
if getattr(args, "health", False):
|
|
1237
|
+
console.print("\n[bold]🔄 Running full health check...[/]")
|
|
1238
|
+
try:
|
|
1239
|
+
result = subprocess.run(
|
|
1240
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
|
|
1241
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
|
|
1242
|
+
capture_output=True, text=True, timeout=60
|
|
1243
|
+
)
|
|
1244
|
+
console.print("[green]Health check triggered. View results with:[/]")
|
|
1245
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/]")
|
|
1246
|
+
console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
|
|
1247
|
+
except Exception as e:
|
|
1248
|
+
console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
|
|
1249
|
+
|
|
1250
|
+
|
|
1251
|
+
def cmd_export(args):
|
|
1252
|
+
"""Export VM and data for migration to another workstation."""
|
|
1253
|
+
import subprocess
|
|
1254
|
+
import tarfile
|
|
1255
|
+
import shutil
|
|
1256
|
+
|
|
1257
|
+
name = args.name
|
|
1258
|
+
user_session = getattr(args, "user", False)
|
|
1259
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1260
|
+
include_data = getattr(args, "include_data", False)
|
|
1261
|
+
output = getattr(args, "output", None)
|
|
1262
|
+
|
|
1263
|
+
# If name is a path, load config
|
|
1264
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1265
|
+
target_path = Path(name).expanduser().resolve()
|
|
1266
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
1267
|
+
if config_file.exists():
|
|
1268
|
+
config = load_clonebox_config(config_file)
|
|
1269
|
+
name = config["vm"]["name"]
|
|
1270
|
+
else:
|
|
1271
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1272
|
+
return
|
|
1273
|
+
elif not name or name == ".":
|
|
1274
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
1275
|
+
if config_file.exists():
|
|
1276
|
+
config = load_clonebox_config(config_file)
|
|
1277
|
+
name = config["vm"]["name"]
|
|
1278
|
+
else:
|
|
1279
|
+
console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
|
|
1280
|
+
console.print("[dim]Usage: clonebox export . or clonebox export <vm-name>[/]")
|
|
1281
|
+
return
|
|
1282
|
+
|
|
1283
|
+
console.print(f"[bold cyan]📦 Exporting VM: {name}[/]\n")
|
|
1284
|
+
|
|
1285
|
+
# Get actual disk location from virsh
|
|
1286
|
+
try:
|
|
1287
|
+
result = subprocess.run(
|
|
1288
|
+
["virsh", "--connect", conn_uri, "domblklist", name, "--details"],
|
|
1289
|
+
capture_output=True, text=True, timeout=10
|
|
1290
|
+
)
|
|
1291
|
+
if result.returncode != 0:
|
|
1292
|
+
console.print(f"[red]❌ VM '{name}' not found[/]")
|
|
1293
|
+
return
|
|
1294
|
+
|
|
1295
|
+
# Parse disk paths from output
|
|
1296
|
+
disk_path = None
|
|
1297
|
+
cloudinit_path = None
|
|
1298
|
+
for line in result.stdout.split('\n'):
|
|
1299
|
+
if 'disk' in line and '.qcow2' in line:
|
|
1300
|
+
parts = line.split()
|
|
1301
|
+
if len(parts) >= 4:
|
|
1302
|
+
disk_path = Path(parts[3])
|
|
1303
|
+
elif 'cdrom' in line or '.iso' in line:
|
|
1304
|
+
parts = line.split()
|
|
1305
|
+
if len(parts) >= 4:
|
|
1306
|
+
cloudinit_path = Path(parts[3])
|
|
1307
|
+
|
|
1308
|
+
if not disk_path or not disk_path.exists():
|
|
1309
|
+
console.print(f"[red]❌ VM disk not found[/]")
|
|
1310
|
+
return
|
|
1311
|
+
|
|
1312
|
+
console.print(f"[dim]Disk location: {disk_path}[/]")
|
|
1313
|
+
|
|
1314
|
+
except Exception as e:
|
|
1315
|
+
console.print(f"[red]❌ Error getting VM disk: {e}[/]")
|
|
1316
|
+
return
|
|
1317
|
+
|
|
1318
|
+
# Create export directory
|
|
1319
|
+
export_name = output or f"{name}-export.tar.gz"
|
|
1320
|
+
if not export_name.endswith(".tar.gz"):
|
|
1321
|
+
export_name += ".tar.gz"
|
|
1322
|
+
|
|
1323
|
+
export_path = Path(export_name).resolve()
|
|
1324
|
+
temp_dir = Path(f"/tmp/clonebox-export-{name}")
|
|
1325
|
+
|
|
1326
|
+
try:
|
|
1327
|
+
# Clean up temp dir if exists
|
|
1328
|
+
if temp_dir.exists():
|
|
1329
|
+
shutil.rmtree(temp_dir)
|
|
1330
|
+
temp_dir.mkdir(parents=True)
|
|
1331
|
+
|
|
1332
|
+
# Stop VM if running
|
|
1333
|
+
console.print("[cyan]Stopping VM for export...[/]")
|
|
1334
|
+
subprocess.run(
|
|
1335
|
+
["virsh", "--connect", conn_uri, "shutdown", name],
|
|
1336
|
+
capture_output=True, timeout=30
|
|
1337
|
+
)
|
|
1338
|
+
import time
|
|
1339
|
+
time.sleep(5)
|
|
1340
|
+
subprocess.run(
|
|
1341
|
+
["virsh", "--connect", conn_uri, "destroy", name],
|
|
1342
|
+
capture_output=True, timeout=10
|
|
1343
|
+
)
|
|
1344
|
+
|
|
1345
|
+
# Export VM XML
|
|
1346
|
+
console.print("[cyan]Exporting VM definition...[/]")
|
|
1347
|
+
result = subprocess.run(
|
|
1348
|
+
["virsh", "--connect", conn_uri, "dumpxml", name],
|
|
1349
|
+
capture_output=True, text=True, timeout=30
|
|
1350
|
+
)
|
|
1351
|
+
(temp_dir / "vm.xml").write_text(result.stdout)
|
|
1352
|
+
|
|
1353
|
+
# Copy disk image
|
|
1354
|
+
console.print("[cyan]Copying disk image (this may take a while)...[/]")
|
|
1355
|
+
if disk_path and disk_path.exists():
|
|
1356
|
+
shutil.copy2(disk_path, temp_dir / "disk.qcow2")
|
|
1357
|
+
console.print(f"[green]✅ Disk copied: {disk_path.stat().st_size / (1024**3):.2f} GB[/]")
|
|
1358
|
+
else:
|
|
1359
|
+
console.print("[yellow]⚠️ Disk image not found[/]")
|
|
1360
|
+
|
|
1361
|
+
# Copy cloud-init ISO
|
|
1362
|
+
if cloudinit_path and cloudinit_path.exists():
|
|
1363
|
+
shutil.copy2(cloudinit_path, temp_dir / "cloud-init.iso")
|
|
1364
|
+
console.print("[green]✅ Cloud-init ISO copied[/]")
|
|
1365
|
+
|
|
1366
|
+
# Copy config file
|
|
1367
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
1368
|
+
if config_file.exists():
|
|
1369
|
+
shutil.copy2(config_file, temp_dir / ".clonebox.yaml")
|
|
1370
|
+
|
|
1371
|
+
# Copy .env file (without sensitive data warning)
|
|
1372
|
+
env_file = Path.cwd() / ".env"
|
|
1373
|
+
if env_file.exists():
|
|
1374
|
+
shutil.copy2(env_file, temp_dir / ".env")
|
|
1375
|
+
|
|
1376
|
+
# Include shared data if requested
|
|
1377
|
+
if include_data:
|
|
1378
|
+
console.print("[cyan]Bundling shared data (browser profiles, configs)...[/]")
|
|
1379
|
+
data_dir = temp_dir / "data"
|
|
1380
|
+
data_dir.mkdir()
|
|
1381
|
+
|
|
1382
|
+
# Load config to get paths
|
|
1383
|
+
if config_file.exists():
|
|
1384
|
+
config = load_clonebox_config(config_file)
|
|
1385
|
+
all_paths = config.get("paths", {}).copy()
|
|
1386
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
1387
|
+
|
|
1388
|
+
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
1389
|
+
host_p = Path(host_path)
|
|
1390
|
+
if host_p.exists():
|
|
1391
|
+
dest = data_dir / f"mount{idx}"
|
|
1392
|
+
console.print(f" [dim]Copying {host_path}...[/]")
|
|
1393
|
+
try:
|
|
1394
|
+
if host_p.is_dir():
|
|
1395
|
+
shutil.copytree(host_p, dest, symlinks=True,
|
|
1396
|
+
ignore=shutil.ignore_patterns('*.pyc', '__pycache__', '.git'))
|
|
1397
|
+
else:
|
|
1398
|
+
shutil.copy2(host_p, dest)
|
|
1399
|
+
except Exception as e:
|
|
1400
|
+
console.print(f" [yellow]⚠️ Skipped {host_path}: {e}[/]")
|
|
1401
|
+
|
|
1402
|
+
# Save path mapping
|
|
1403
|
+
import json
|
|
1404
|
+
(data_dir / "paths.json").write_text(json.dumps(all_paths, indent=2))
|
|
1405
|
+
|
|
1406
|
+
# Create tarball
|
|
1407
|
+
console.print(f"[cyan]Creating archive: {export_path}[/]")
|
|
1408
|
+
with tarfile.open(export_path, "w:gz") as tar:
|
|
1409
|
+
tar.add(temp_dir, arcname=name)
|
|
1410
|
+
|
|
1411
|
+
# Get size
|
|
1412
|
+
size_mb = export_path.stat().st_size / 1024 / 1024
|
|
1413
|
+
|
|
1414
|
+
console.print(f"\n[bold green]✅ Export complete![/]")
|
|
1415
|
+
console.print(f" File: [cyan]{export_path}[/]")
|
|
1416
|
+
console.print(f" Size: [cyan]{size_mb:.1f} MB[/]")
|
|
1417
|
+
console.print(f"\n[bold]To import on another workstation:[/]")
|
|
1418
|
+
console.print(f" [cyan]clonebox import {export_path.name}[/]")
|
|
1419
|
+
|
|
1420
|
+
finally:
|
|
1421
|
+
# Cleanup
|
|
1422
|
+
if temp_dir.exists():
|
|
1423
|
+
shutil.rmtree(temp_dir)
|
|
1424
|
+
|
|
1425
|
+
# Restart VM
|
|
1426
|
+
console.print("\n[cyan]Restarting VM...[/]")
|
|
1427
|
+
subprocess.run(
|
|
1428
|
+
["virsh", "--connect", conn_uri, "start", name],
|
|
1429
|
+
capture_output=True, timeout=30
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
|
|
1433
|
+
def cmd_import(args):
|
|
1434
|
+
"""Import VM from export archive."""
|
|
1435
|
+
import subprocess
|
|
1436
|
+
import tarfile
|
|
1437
|
+
import shutil
|
|
1438
|
+
|
|
1439
|
+
archive_path = Path(args.archive).resolve()
|
|
1440
|
+
user_session = getattr(args, "user", False)
|
|
1441
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1442
|
+
|
|
1443
|
+
if not archive_path.exists():
|
|
1444
|
+
console.print(f"[red]❌ Archive not found: {archive_path}[/]")
|
|
1445
|
+
return
|
|
1446
|
+
|
|
1447
|
+
console.print(f"[bold cyan]📥 Importing VM from: {archive_path}[/]\n")
|
|
1448
|
+
|
|
1449
|
+
# Determine storage path
|
|
1450
|
+
if user_session:
|
|
1451
|
+
storage_base = Path.home() / ".local/share/libvirt/images"
|
|
1452
|
+
else:
|
|
1453
|
+
storage_base = Path("/var/lib/libvirt/images")
|
|
1454
|
+
|
|
1455
|
+
storage_base.mkdir(parents=True, exist_ok=True)
|
|
1456
|
+
|
|
1457
|
+
temp_dir = Path(f"/tmp/clonebox-import-{archive_path.stem}")
|
|
1458
|
+
|
|
1459
|
+
try:
|
|
1460
|
+
# Extract archive
|
|
1461
|
+
console.print("[cyan]Extracting archive...[/]")
|
|
1462
|
+
if temp_dir.exists():
|
|
1463
|
+
shutil.rmtree(temp_dir)
|
|
1464
|
+
temp_dir.mkdir(parents=True)
|
|
1465
|
+
|
|
1466
|
+
with tarfile.open(archive_path, "r:gz") as tar:
|
|
1467
|
+
tar.extractall(temp_dir)
|
|
1468
|
+
|
|
1469
|
+
# Find extracted VM directory
|
|
1470
|
+
vm_dirs = list(temp_dir.iterdir())
|
|
1471
|
+
if not vm_dirs:
|
|
1472
|
+
console.print("[red]❌ Empty archive[/]")
|
|
1473
|
+
return
|
|
1474
|
+
|
|
1475
|
+
extracted_dir = vm_dirs[0]
|
|
1476
|
+
vm_name = extracted_dir.name
|
|
1477
|
+
|
|
1478
|
+
console.print(f"[cyan]VM Name: {vm_name}[/]")
|
|
1479
|
+
|
|
1480
|
+
# Create VM storage directory
|
|
1481
|
+
vm_storage = storage_base / vm_name
|
|
1482
|
+
if vm_storage.exists():
|
|
1483
|
+
if not getattr(args, "replace", False):
|
|
1484
|
+
console.print(f"[red]❌ VM '{vm_name}' already exists. Use --replace to overwrite.[/]")
|
|
1485
|
+
return
|
|
1486
|
+
shutil.rmtree(vm_storage)
|
|
1487
|
+
|
|
1488
|
+
vm_storage.mkdir(parents=True)
|
|
1489
|
+
|
|
1490
|
+
# Copy disk image
|
|
1491
|
+
console.print("[cyan]Copying disk image...[/]")
|
|
1492
|
+
disk_src = extracted_dir / "disk.qcow2"
|
|
1493
|
+
if disk_src.exists():
|
|
1494
|
+
shutil.copy2(disk_src, vm_storage / f"{vm_name}.qcow2")
|
|
1495
|
+
|
|
1496
|
+
# Copy cloud-init ISO
|
|
1497
|
+
cloudinit_src = extracted_dir / "cloud-init.iso"
|
|
1498
|
+
if cloudinit_src.exists():
|
|
1499
|
+
shutil.copy2(cloudinit_src, vm_storage / "cloud-init.iso")
|
|
1500
|
+
|
|
1501
|
+
# Copy config files to current directory
|
|
1502
|
+
config_src = extracted_dir / ".clonebox.yaml"
|
|
1503
|
+
if config_src.exists():
|
|
1504
|
+
shutil.copy2(config_src, Path.cwd() / ".clonebox.yaml")
|
|
1505
|
+
console.print("[green]✅ Copied .clonebox.yaml[/]")
|
|
1506
|
+
|
|
1507
|
+
env_src = extracted_dir / ".env"
|
|
1508
|
+
if env_src.exists():
|
|
1509
|
+
shutil.copy2(env_src, Path.cwd() / ".env")
|
|
1510
|
+
console.print("[green]✅ Copied .env[/]")
|
|
1511
|
+
|
|
1512
|
+
# Restore data if included
|
|
1513
|
+
data_dir = extracted_dir / "data"
|
|
1514
|
+
if data_dir.exists():
|
|
1515
|
+
import json
|
|
1516
|
+
paths_file = data_dir / "paths.json"
|
|
1517
|
+
if paths_file.exists():
|
|
1518
|
+
paths_mapping = json.loads(paths_file.read_text())
|
|
1519
|
+
console.print("\n[cyan]Restoring shared data...[/]")
|
|
1520
|
+
|
|
1521
|
+
for idx, (host_path, guest_path) in enumerate(paths_mapping.items()):
|
|
1522
|
+
src = data_dir / f"mount{idx}"
|
|
1523
|
+
if src.exists():
|
|
1524
|
+
dest = Path(host_path)
|
|
1525
|
+
console.print(f" [dim]Restoring to {host_path}...[/]")
|
|
1526
|
+
try:
|
|
1527
|
+
if dest.exists():
|
|
1528
|
+
console.print(f" [yellow]⚠️ Skipped (already exists)[/]")
|
|
1529
|
+
else:
|
|
1530
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
1531
|
+
if src.is_dir():
|
|
1532
|
+
shutil.copytree(src, dest)
|
|
1533
|
+
else:
|
|
1534
|
+
shutil.copy2(src, dest)
|
|
1535
|
+
except Exception as e:
|
|
1536
|
+
console.print(f" [yellow]⚠️ Error: {e}[/]")
|
|
1537
|
+
|
|
1538
|
+
# Modify and define VM XML
|
|
1539
|
+
console.print("\n[cyan]Defining VM...[/]")
|
|
1540
|
+
xml_src = extracted_dir / "vm.xml"
|
|
1541
|
+
if xml_src.exists():
|
|
1542
|
+
xml_content = xml_src.read_text()
|
|
1543
|
+
|
|
1544
|
+
# Update paths in XML to new storage location
|
|
1545
|
+
# This is a simple replacement - may need more sophisticated handling
|
|
1546
|
+
xml_content = xml_content.replace(
|
|
1547
|
+
f"/home/", f"{Path.home()}/"
|
|
1548
|
+
)
|
|
1549
|
+
|
|
1550
|
+
# Write modified XML
|
|
1551
|
+
modified_xml = temp_dir / "vm-modified.xml"
|
|
1552
|
+
modified_xml.write_text(xml_content)
|
|
1553
|
+
|
|
1554
|
+
# Define VM
|
|
1555
|
+
result = subprocess.run(
|
|
1556
|
+
["virsh", "--connect", conn_uri, "define", str(modified_xml)],
|
|
1557
|
+
capture_output=True, text=True, timeout=30
|
|
1558
|
+
)
|
|
1559
|
+
|
|
1560
|
+
if result.returncode == 0:
|
|
1561
|
+
console.print(f"[green]✅ VM '{vm_name}' defined successfully![/]")
|
|
1562
|
+
else:
|
|
1563
|
+
console.print(f"[yellow]⚠️ VM definition warning: {result.stderr}[/]")
|
|
1564
|
+
|
|
1565
|
+
console.print(f"\n[bold green]✅ Import complete![/]")
|
|
1566
|
+
console.print(f"\n[bold]To start the VM:[/]")
|
|
1567
|
+
console.print(f" [cyan]clonebox start . {'--user' if user_session else ''}[/]")
|
|
1568
|
+
|
|
1569
|
+
finally:
|
|
1570
|
+
# Cleanup
|
|
1571
|
+
if temp_dir.exists():
|
|
1572
|
+
shutil.rmtree(temp_dir)
|
|
1573
|
+
|
|
1574
|
+
|
|
1575
|
+
def cmd_test(args):
|
|
1576
|
+
"""Test VM configuration and health."""
|
|
1577
|
+
import subprocess
|
|
1578
|
+
import json
|
|
1579
|
+
from clonebox.validator import VMValidator
|
|
1580
|
+
|
|
1581
|
+
name = args.name
|
|
1582
|
+
user_session = getattr(args, "user", False)
|
|
1583
|
+
quick = getattr(args, "quick", False)
|
|
1584
|
+
verbose = getattr(args, "verbose", False)
|
|
1585
|
+
validate_all = getattr(args, "validate", False)
|
|
1586
|
+
require_running_apps = getattr(args, "require_running_apps", False)
|
|
1587
|
+
smoke_test = getattr(args, "smoke_test", False)
|
|
1588
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1589
|
+
|
|
1590
|
+
# If name is a path, load config
|
|
1591
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
1592
|
+
target_path = Path(name).expanduser().resolve()
|
|
1593
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
1594
|
+
if not config_file.exists():
|
|
1595
|
+
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
1596
|
+
return
|
|
1597
|
+
else:
|
|
1598
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
1599
|
+
if not config_file.exists():
|
|
1600
|
+
console.print("[red]❌ No .clonebox.yaml found in current directory[/]")
|
|
1601
|
+
return
|
|
1602
|
+
|
|
1603
|
+
console.print(f"[bold cyan]🧪 Testing VM configuration: {config_file}[/]\n")
|
|
1604
|
+
|
|
1605
|
+
# Load config
|
|
1606
|
+
try:
|
|
1607
|
+
config = load_clonebox_config(config_file)
|
|
1608
|
+
vm_name = config["vm"]["name"]
|
|
1609
|
+
console.print(f"[green]✅ Config loaded successfully[/]")
|
|
1610
|
+
console.print(f" VM Name: {vm_name}")
|
|
1611
|
+
console.print(f" RAM: {config['vm']['ram_mb']}MB")
|
|
1612
|
+
console.print(f" vCPUs: {config['vm']['vcpus']}")
|
|
1613
|
+
console.print(f" GUI: {'Yes' if config['vm']['gui'] else 'No'}")
|
|
1614
|
+
except Exception as e:
|
|
1615
|
+
console.print(f"[red]❌ Failed to load config: {e}[/]")
|
|
1616
|
+
return
|
|
1617
|
+
|
|
1618
|
+
console.print()
|
|
1619
|
+
|
|
1620
|
+
# Test 1: Check VM exists
|
|
1621
|
+
console.print("[bold]1. VM Existence Check[/]")
|
|
1622
|
+
try:
|
|
1623
|
+
result = subprocess.run(
|
|
1624
|
+
["virsh", "--connect", conn_uri, "dominfo", vm_name],
|
|
1625
|
+
capture_output=True, text=True, timeout=10
|
|
1626
|
+
)
|
|
1627
|
+
if result.returncode == 0:
|
|
1628
|
+
console.print("[green]✅ VM is defined in libvirt[/]")
|
|
1629
|
+
if verbose:
|
|
1630
|
+
for line in result.stdout.split('\n'):
|
|
1631
|
+
if ':' in line:
|
|
1632
|
+
console.print(f" {line}")
|
|
1633
|
+
else:
|
|
1634
|
+
console.print("[red]❌ VM not found in libvirt[/]")
|
|
1635
|
+
console.print(" Run: clonebox create .clonebox.yaml --start")
|
|
1636
|
+
return
|
|
1637
|
+
except Exception as e:
|
|
1638
|
+
console.print(f"[red]❌ Error checking VM: {e}[/]")
|
|
1639
|
+
return
|
|
1640
|
+
|
|
1641
|
+
console.print()
|
|
1642
|
+
|
|
1643
|
+
# Test 2: Check VM state
|
|
1644
|
+
console.print("[bold]2. VM State Check[/]")
|
|
1645
|
+
try:
|
|
1646
|
+
result = subprocess.run(
|
|
1647
|
+
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
1648
|
+
capture_output=True, text=True, timeout=10
|
|
1649
|
+
)
|
|
1650
|
+
state = result.stdout.strip()
|
|
1651
|
+
if state == "running":
|
|
1652
|
+
console.print("[green]✅ VM is running[/]")
|
|
1653
|
+
|
|
1654
|
+
# Test network if running
|
|
1655
|
+
console.print("\n Checking network...")
|
|
1656
|
+
try:
|
|
1657
|
+
result = subprocess.run(
|
|
1658
|
+
["virsh", "--connect", conn_uri, "domifaddr", vm_name],
|
|
1659
|
+
capture_output=True, text=True, timeout=10
|
|
1660
|
+
)
|
|
1661
|
+
if "192.168" in result.stdout or "10.0" in result.stdout:
|
|
1662
|
+
console.print("[green]✅ VM has network access[/]")
|
|
1663
|
+
if verbose:
|
|
1664
|
+
for line in result.stdout.split('\n'):
|
|
1665
|
+
if '192.168' in line or '10.0' in line:
|
|
1666
|
+
console.print(f" IP: {line.split()[-1]}")
|
|
1667
|
+
else:
|
|
1668
|
+
console.print("[yellow]⚠️ No IP address detected via virsh domifaddr[/]")
|
|
1669
|
+
# Fallback: try to get IP via QEMU Guest Agent (useful for slirp/user networking)
|
|
1670
|
+
try:
|
|
1671
|
+
from .cli import _qga_ping, _qga_exec
|
|
1672
|
+
except ImportError:
|
|
1673
|
+
from clonebox.cli import _qga_ping, _qga_exec
|
|
1674
|
+
if _qga_ping(vm_name, conn_uri):
|
|
1675
|
+
try:
|
|
1676
|
+
ip_out = _qga_exec(vm_name, conn_uri, "ip -4 -o addr show scope global | awk '{print $4}'", timeout=5)
|
|
1677
|
+
if ip_out and ip_out.strip():
|
|
1678
|
+
console.print(f"[green]✅ VM has network access (IP via QGA: {ip_out.strip()})[/]")
|
|
1679
|
+
else:
|
|
1680
|
+
console.print("[yellow]⚠️ IP not available via QGA[/]")
|
|
1681
|
+
except Exception as e:
|
|
1682
|
+
console.print(f"[yellow]⚠️ Could not get IP via QGA ({e})[/]")
|
|
1683
|
+
else:
|
|
1684
|
+
console.print("[dim]IP: QEMU Guest Agent not connected[/]")
|
|
1685
|
+
except:
|
|
1686
|
+
console.print("[yellow]⚠️ Could not check network[/]")
|
|
1687
|
+
else:
|
|
1688
|
+
console.print(f"[yellow]⚠️ VM is not running (state: {state})[/]")
|
|
1689
|
+
console.print(" Run: clonebox start .")
|
|
1690
|
+
except Exception as e:
|
|
1691
|
+
console.print(f"[red]❌ Error checking VM state: {e}[/]")
|
|
1692
|
+
|
|
1693
|
+
console.print()
|
|
1694
|
+
|
|
1695
|
+
# Test 3: Check cloud-init status (if running)
|
|
1696
|
+
if not quick and state == "running":
|
|
1697
|
+
console.print("[bold]3. Cloud-init Status[/]")
|
|
1698
|
+
try:
|
|
1699
|
+
# Try to get cloud-init status via QEMU guest agent
|
|
1700
|
+
result = subprocess.run(
|
|
1701
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
|
|
1702
|
+
'{"execute":"guest-exec","arguments":{"path":"cloud-init","arg":["status"],"capture-output":true}}'],
|
|
1703
|
+
capture_output=True, text=True, timeout=15
|
|
1704
|
+
)
|
|
1705
|
+
if result.returncode == 0:
|
|
1706
|
+
try:
|
|
1707
|
+
response = json.loads(result.stdout)
|
|
1708
|
+
if "return" in response:
|
|
1709
|
+
pid = response["return"]["pid"]
|
|
1710
|
+
# Get output
|
|
1711
|
+
result2 = subprocess.run(
|
|
1712
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
|
|
1713
|
+
f'{{"execute":"guest-exec-status","arguments":{"pid":{pid}}}}'],
|
|
1714
|
+
capture_output=True, text=True, timeout=15
|
|
1715
|
+
)
|
|
1716
|
+
if result2.returncode == 0:
|
|
1717
|
+
resp2 = json.loads(result2.stdout)
|
|
1718
|
+
if "return" in resp2 and resp2["return"]["exited"]:
|
|
1719
|
+
output = resp2["return"]["out-data"]
|
|
1720
|
+
if output:
|
|
1721
|
+
import base64
|
|
1722
|
+
status = base64.b64decode(output).decode()
|
|
1723
|
+
if "done" in status.lower():
|
|
1724
|
+
console.print("[green]✅ Cloud-init completed[/]")
|
|
1725
|
+
elif "running" in status.lower():
|
|
1726
|
+
console.print("[yellow]⚠️ Cloud-init still running[/]")
|
|
1727
|
+
else:
|
|
1728
|
+
console.print(f"[yellow]⚠️ Cloud-init status: {status.strip()}[/]")
|
|
1729
|
+
except:
|
|
1730
|
+
pass
|
|
1731
|
+
except:
|
|
1732
|
+
console.print("[yellow]⚠️ Could not check cloud-init (QEMU agent may not be running)[/]")
|
|
1733
|
+
|
|
1734
|
+
console.print()
|
|
1735
|
+
|
|
1736
|
+
# Test 4: Check mounts (if running)
|
|
1737
|
+
if not quick and state == "running":
|
|
1738
|
+
console.print("[bold]4. Mount Points Check[/]")
|
|
1739
|
+
all_paths = config.get("paths", {}).copy()
|
|
1740
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
1741
|
+
|
|
1742
|
+
if all_paths:
|
|
1743
|
+
for idx, (host_path, guest_path) in enumerate(all_paths.items()):
|
|
1744
|
+
try:
|
|
1745
|
+
# Use the same QGA helper as diagnose/status
|
|
1746
|
+
is_accessible = _qga_exec(vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5)
|
|
1747
|
+
if is_accessible == "yes":
|
|
1748
|
+
console.print(f"[green]✅ {guest_path}[/]")
|
|
1749
|
+
else:
|
|
1750
|
+
console.print(f"[red]❌ {guest_path} (not accessible)[/]")
|
|
1751
|
+
except Exception:
|
|
1752
|
+
console.print(f"[yellow]⚠️ {guest_path} (could not check)[/]")
|
|
1753
|
+
else:
|
|
1754
|
+
console.print("[dim]No mount points configured[/]")
|
|
1755
|
+
|
|
1756
|
+
console.print()
|
|
1757
|
+
|
|
1758
|
+
# Test 5: Run health check (if running and not quick)
|
|
1759
|
+
if not quick and state == "running":
|
|
1760
|
+
console.print("[bold]5. Health Check[/]")
|
|
1761
|
+
try:
|
|
1762
|
+
result = subprocess.run(
|
|
1763
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
|
|
1764
|
+
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
|
|
1765
|
+
capture_output=True, text=True, timeout=60
|
|
1766
|
+
)
|
|
1767
|
+
if result.returncode == 0:
|
|
1768
|
+
console.print("[green]✅ Health check triggered[/]")
|
|
1769
|
+
console.print(" View results in VM: cat /var/log/clonebox-health.log")
|
|
1770
|
+
else:
|
|
1771
|
+
console.print("[yellow]⚠️ Health check script not found[/]")
|
|
1772
|
+
console.print(" VM may not have been created with health checks")
|
|
1773
|
+
except Exception as e:
|
|
1774
|
+
console.print(f"[yellow]⚠️ Could not run health check: {e}[/]")
|
|
1775
|
+
|
|
1776
|
+
console.print()
|
|
1777
|
+
|
|
1778
|
+
# Run full validation if requested
|
|
1779
|
+
if validate_all and state == "running":
|
|
1780
|
+
validator = VMValidator(
|
|
1781
|
+
config,
|
|
1782
|
+
vm_name,
|
|
1783
|
+
conn_uri,
|
|
1784
|
+
console,
|
|
1785
|
+
require_running_apps=require_running_apps,
|
|
1786
|
+
smoke_test=smoke_test,
|
|
1787
|
+
)
|
|
1788
|
+
results = validator.validate_all()
|
|
1789
|
+
|
|
1790
|
+
# Exit with error code if validations failed
|
|
1791
|
+
if results["overall"] == "partial":
|
|
1792
|
+
return 1
|
|
1793
|
+
else:
|
|
1794
|
+
# Summary
|
|
1795
|
+
console.print("[bold]Test Summary[/]")
|
|
1796
|
+
console.print("VM configuration is valid and VM is accessible.")
|
|
1797
|
+
console.print("\n[dim]For full validation including packages, services, and mounts:[/]")
|
|
1798
|
+
console.print("[dim] clonebox test . --user --validate[/]")
|
|
1799
|
+
console.print("\n[dim]For detailed health report, run in VM:[/]")
|
|
1800
|
+
console.print("[dim] cat /var/log/clonebox-health.log[/]")
|
|
1801
|
+
|
|
1802
|
+
return 0
|
|
1803
|
+
|
|
1804
|
+
|
|
1805
|
+
CLONEBOX_CONFIG_FILE = ".clonebox.yaml"
|
|
1806
|
+
CLONEBOX_ENV_FILE = ".env"
|
|
1807
|
+
|
|
1808
|
+
|
|
1809
|
+
def load_env_file(env_path: Path) -> dict:
|
|
1810
|
+
"""Load environment variables from .env file."""
|
|
1811
|
+
env_vars = {}
|
|
1812
|
+
if not env_path.exists():
|
|
1813
|
+
return env_vars
|
|
1814
|
+
|
|
1815
|
+
with open(env_path) as f:
|
|
1816
|
+
for line in f:
|
|
1817
|
+
line = line.strip()
|
|
1818
|
+
if not line or line.startswith('#'):
|
|
1819
|
+
continue
|
|
1820
|
+
if '=' in line:
|
|
1821
|
+
key, value = line.split('=', 1)
|
|
1822
|
+
env_vars[key.strip()] = value.strip()
|
|
1823
|
+
|
|
1824
|
+
return env_vars
|
|
1825
|
+
|
|
1826
|
+
|
|
1827
|
+
def expand_env_vars(value, env_vars: dict):
|
|
1828
|
+
"""Expand environment variables in string values like ${VAR_NAME}."""
|
|
1829
|
+
if isinstance(value, str):
|
|
1830
|
+
# Replace ${VAR_NAME} with value from env_vars or os.environ
|
|
1831
|
+
def replacer(match):
|
|
1832
|
+
var_name = match.group(1)
|
|
1833
|
+
return env_vars.get(var_name, os.environ.get(var_name, match.group(0)))
|
|
1834
|
+
return re.sub(r'\$\{([^}]+)\}', replacer, value)
|
|
1835
|
+
elif isinstance(value, dict):
|
|
1836
|
+
return {k: expand_env_vars(v, env_vars) for k, v in value.items()}
|
|
1837
|
+
elif isinstance(value, list):
|
|
1838
|
+
return [expand_env_vars(item, env_vars) for item in value]
|
|
1839
|
+
return value
|
|
1840
|
+
|
|
1841
|
+
|
|
1842
|
+
def deduplicate_list(items: list, key=None) -> list:
|
|
1843
|
+
"""Remove duplicates from list, preserving order."""
|
|
1844
|
+
seen = set()
|
|
1845
|
+
result = []
|
|
1846
|
+
for item in items:
|
|
1847
|
+
k = key(item) if key else item
|
|
1848
|
+
if k not in seen:
|
|
1849
|
+
seen.add(k)
|
|
1850
|
+
result.append(item)
|
|
1851
|
+
return result
|
|
1852
|
+
|
|
1853
|
+
|
|
1854
|
+
def generate_clonebox_yaml(
|
|
1855
|
+
snapshot,
|
|
1856
|
+
detector,
|
|
1857
|
+
deduplicate: bool = True,
|
|
1858
|
+
target_path: str = None,
|
|
1859
|
+
vm_name: str = None,
|
|
1860
|
+
network_mode: str = "auto",
|
|
1861
|
+
base_image: Optional[str] = None,
|
|
1862
|
+
disk_size_gb: Optional[int] = None,
|
|
1863
|
+
) -> str:
|
|
1864
|
+
"""Generate YAML config from system snapshot."""
|
|
1865
|
+
sys_info = detector.get_system_info()
|
|
1866
|
+
|
|
1867
|
+
# Services that should NOT be cloned to VM (host-specific)
|
|
1868
|
+
VM_EXCLUDED_SERVICES = {
|
|
1869
|
+
"libvirtd", "virtlogd", "libvirt-guests", "qemu-guest-agent",
|
|
1870
|
+
"bluetooth", "bluez", "upower", "thermald", "tlp", "power-profiles-daemon",
|
|
1871
|
+
"gdm", "gdm3", "sddm", "lightdm",
|
|
1872
|
+
"snap.cups.cups-browsed", "snap.cups.cupsd",
|
|
1873
|
+
"ModemManager", "wpa_supplicant",
|
|
1874
|
+
"accounts-daemon", "colord", "switcheroo-control",
|
|
1875
|
+
}
|
|
1876
|
+
|
|
1877
|
+
# Collect services (excluding host-specific ones)
|
|
1878
|
+
services = [s.name for s in snapshot.running_services if s.name not in VM_EXCLUDED_SERVICES]
|
|
1879
|
+
if deduplicate:
|
|
1880
|
+
services = deduplicate_list(services)
|
|
1881
|
+
|
|
1882
|
+
# Collect paths with types
|
|
1883
|
+
paths_by_type = {"project": [], "config": [], "data": []}
|
|
1884
|
+
for p in snapshot.paths:
|
|
1885
|
+
if p.type in paths_by_type:
|
|
1886
|
+
paths_by_type[p.type].append(p.path)
|
|
1887
|
+
|
|
1888
|
+
if deduplicate:
|
|
1889
|
+
for ptype in paths_by_type:
|
|
1890
|
+
paths_by_type[ptype] = deduplicate_list(paths_by_type[ptype])
|
|
1891
|
+
|
|
1892
|
+
# Collect working directories from running apps
|
|
1893
|
+
working_dirs = []
|
|
1894
|
+
for app in snapshot.applications:
|
|
1895
|
+
if app.working_dir and app.working_dir != "/" and app.working_dir.startswith("/home"):
|
|
1896
|
+
working_dirs.append(app.working_dir)
|
|
1897
|
+
|
|
1898
|
+
if deduplicate:
|
|
1899
|
+
working_dirs = deduplicate_list(working_dirs)
|
|
1900
|
+
|
|
1901
|
+
# If target_path specified, prioritize it
|
|
1902
|
+
if target_path:
|
|
1903
|
+
target_path = Path(target_path).resolve()
|
|
1904
|
+
target_str = str(target_path)
|
|
1905
|
+
if target_str not in paths_by_type["project"]:
|
|
1906
|
+
paths_by_type["project"].insert(0, target_str)
|
|
1907
|
+
|
|
1908
|
+
# Build paths mapping
|
|
1909
|
+
paths_mapping = {}
|
|
1910
|
+
idx = 0
|
|
1911
|
+
for host_path in paths_by_type["project"][:5]: # Limit projects
|
|
1912
|
+
paths_mapping[host_path] = f"/mnt/project{idx}"
|
|
1913
|
+
idx += 1
|
|
1914
|
+
|
|
1915
|
+
for host_path in working_dirs[:3]: # Limit working dirs
|
|
1916
|
+
if host_path not in paths_mapping:
|
|
1917
|
+
paths_mapping[host_path] = f"/mnt/workdir{idx}"
|
|
1918
|
+
idx += 1
|
|
1919
|
+
|
|
1920
|
+
# Add default user folders (Downloads, Documents)
|
|
1921
|
+
home_dir = Path.home()
|
|
1922
|
+
default_folders = [
|
|
1923
|
+
(home_dir / "Downloads", "/home/ubuntu/Downloads"),
|
|
1924
|
+
(home_dir / "Documents", "/home/ubuntu/Documents"),
|
|
1925
|
+
]
|
|
1926
|
+
for host_folder, guest_folder in default_folders:
|
|
1927
|
+
if host_folder.exists() and str(host_folder) not in paths_mapping:
|
|
1928
|
+
paths_mapping[str(host_folder)] = guest_folder
|
|
1929
|
+
|
|
1930
|
+
# Detect and add app-specific data directories for running applications
|
|
1931
|
+
# This includes browser profiles, IDE settings, credentials, extensions, etc.
|
|
1932
|
+
app_data_dirs = detector.detect_app_data_dirs(snapshot.applications)
|
|
1933
|
+
app_data_mapping = {}
|
|
1934
|
+
for app_data in app_data_dirs:
|
|
1935
|
+
host_path = app_data["path"]
|
|
1936
|
+
if host_path not in paths_mapping:
|
|
1937
|
+
# Map to same relative path in VM user home
|
|
1938
|
+
rel_path = host_path.replace(str(home_dir), "").lstrip("/")
|
|
1939
|
+
guest_path = f"/home/ubuntu/{rel_path}"
|
|
1940
|
+
app_data_mapping[host_path] = guest_path
|
|
1941
|
+
|
|
1942
|
+
post_commands = []
|
|
1943
|
+
|
|
1944
|
+
chrome_profile = home_dir / ".config" / "google-chrome"
|
|
1945
|
+
if chrome_profile.exists():
|
|
1946
|
+
host_path = str(chrome_profile)
|
|
1947
|
+
if host_path not in paths_mapping and host_path not in app_data_mapping:
|
|
1948
|
+
app_data_mapping[host_path] = "/home/ubuntu/.config/google-chrome"
|
|
1949
|
+
|
|
1950
|
+
post_commands.append(
|
|
1951
|
+
"command -v google-chrome >/dev/null 2>&1 || ("
|
|
1952
|
+
"curl -fsSL -o /tmp/google-chrome.deb https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb && "
|
|
1953
|
+
"apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y /tmp/google-chrome.deb"
|
|
1954
|
+
")"
|
|
1955
|
+
)
|
|
1956
|
+
|
|
1957
|
+
# Determine VM name
|
|
1958
|
+
if not vm_name:
|
|
1959
|
+
if target_path:
|
|
1960
|
+
vm_name = f"clone-{target_path.name}"
|
|
1961
|
+
else:
|
|
1962
|
+
vm_name = f"clone-{sys_info['hostname']}"
|
|
1963
|
+
|
|
1964
|
+
# Calculate recommended resources
|
|
1965
|
+
ram_mb = min(4096, int(sys_info["memory_available_gb"] * 1024 * 0.5))
|
|
1966
|
+
vcpus = max(2, sys_info["cpu_count"] // 2)
|
|
1967
|
+
|
|
1968
|
+
if disk_size_gb is None:
|
|
1969
|
+
disk_size_gb = 20
|
|
1970
|
+
|
|
1971
|
+
# Auto-detect packages from running applications and services
|
|
1972
|
+
app_packages = detector.suggest_packages_for_apps(snapshot.applications)
|
|
1973
|
+
service_packages = detector.suggest_packages_for_services(snapshot.running_services)
|
|
1974
|
+
|
|
1975
|
+
# Combine with base packages (apt only)
|
|
1976
|
+
base_packages = [
|
|
1977
|
+
"build-essential",
|
|
1978
|
+
"git",
|
|
1979
|
+
"curl",
|
|
1980
|
+
"vim",
|
|
1981
|
+
]
|
|
1982
|
+
|
|
1983
|
+
# Merge apt packages and deduplicate
|
|
1984
|
+
all_apt_packages = base_packages + app_packages["apt"] + service_packages["apt"]
|
|
1985
|
+
if deduplicate:
|
|
1986
|
+
all_apt_packages = deduplicate_list(all_apt_packages)
|
|
1987
|
+
|
|
1988
|
+
# Merge snap packages and deduplicate
|
|
1989
|
+
all_snap_packages = app_packages["snap"] + service_packages["snap"]
|
|
1990
|
+
if deduplicate:
|
|
1991
|
+
all_snap_packages = deduplicate_list(all_snap_packages)
|
|
1992
|
+
|
|
1993
|
+
if chrome_profile.exists() and "google-chrome" not in [d.get("app", "") for d in app_data_dirs]:
|
|
1994
|
+
if "chromium" not in all_snap_packages:
|
|
1995
|
+
all_snap_packages.append("chromium")
|
|
1996
|
+
|
|
1997
|
+
if "pycharm-community" in all_snap_packages:
|
|
1998
|
+
remapped = {}
|
|
1999
|
+
for host_path, guest_path in app_data_mapping.items():
|
|
2000
|
+
if guest_path == "/home/ubuntu/.config/JetBrains":
|
|
2001
|
+
remapped[host_path] = "/home/ubuntu/snap/pycharm-community/common/.config/JetBrains"
|
|
2002
|
+
elif guest_path == "/home/ubuntu/.local/share/JetBrains":
|
|
2003
|
+
remapped[host_path] = "/home/ubuntu/snap/pycharm-community/common/.local/share/JetBrains"
|
|
2004
|
+
elif guest_path == "/home/ubuntu/.cache/JetBrains":
|
|
2005
|
+
remapped[host_path] = "/home/ubuntu/snap/pycharm-community/common/.cache/JetBrains"
|
|
2006
|
+
else:
|
|
2007
|
+
remapped[host_path] = guest_path
|
|
2008
|
+
app_data_mapping = remapped
|
|
2009
|
+
|
|
2010
|
+
if "firefox" in all_apt_packages:
|
|
2011
|
+
remapped = {}
|
|
2012
|
+
for host_path, guest_path in app_data_mapping.items():
|
|
2013
|
+
if guest_path == "/home/ubuntu/.mozilla/firefox":
|
|
2014
|
+
remapped[host_path] = "/home/ubuntu/snap/firefox/common/.mozilla/firefox"
|
|
2015
|
+
elif guest_path == "/home/ubuntu/.cache/mozilla/firefox":
|
|
2016
|
+
remapped[host_path] = "/home/ubuntu/snap/firefox/common/.cache/mozilla/firefox"
|
|
2017
|
+
else:
|
|
2018
|
+
remapped[host_path] = guest_path
|
|
2019
|
+
app_data_mapping = remapped
|
|
2020
|
+
|
|
2021
|
+
# Build config
|
|
2022
|
+
config = {
|
|
2023
|
+
"version": "1",
|
|
2024
|
+
"generated": datetime.now().isoformat(),
|
|
2025
|
+
"vm": {
|
|
2026
|
+
"name": vm_name,
|
|
2027
|
+
"ram_mb": ram_mb,
|
|
2028
|
+
"vcpus": vcpus,
|
|
2029
|
+
"disk_size_gb": disk_size_gb,
|
|
2030
|
+
"gui": True,
|
|
2031
|
+
"base_image": base_image,
|
|
2032
|
+
"network_mode": network_mode,
|
|
2033
|
+
"username": "ubuntu",
|
|
2034
|
+
"password": "${VM_PASSWORD}",
|
|
2035
|
+
},
|
|
2036
|
+
"services": services,
|
|
2037
|
+
"packages": all_apt_packages,
|
|
2038
|
+
"snap_packages": all_snap_packages,
|
|
2039
|
+
"post_commands": post_commands,
|
|
2040
|
+
"paths": paths_mapping,
|
|
2041
|
+
"app_data_paths": app_data_mapping, # App-specific config/data directories
|
|
2042
|
+
"detected": {
|
|
2043
|
+
"running_apps": [
|
|
2044
|
+
{"name": a.name, "cwd": a.working_dir or "", "memory_mb": round(a.memory_mb)}
|
|
2045
|
+
for a in snapshot.applications[:10]
|
|
2046
|
+
],
|
|
2047
|
+
"app_data_dirs": [
|
|
2048
|
+
{"path": d["path"], "app": d["app"], "size_mb": d["size_mb"]}
|
|
2049
|
+
for d in app_data_dirs[:15]
|
|
2050
|
+
],
|
|
2051
|
+
"all_paths": {
|
|
2052
|
+
"projects": list(paths_by_type["project"]),
|
|
2053
|
+
"configs": list(paths_by_type["config"][:5]),
|
|
2054
|
+
"data": list(paths_by_type["data"][:5]),
|
|
2055
|
+
},
|
|
2056
|
+
},
|
|
2057
|
+
}
|
|
2058
|
+
|
|
2059
|
+
return yaml.dump(config, default_flow_style=False, allow_unicode=True, sort_keys=False)
|
|
2060
|
+
|
|
2061
|
+
|
|
2062
|
+
def load_clonebox_config(path: Path) -> dict:
|
|
2063
|
+
"""Load .clonebox.yaml config file and expand environment variables from .env."""
|
|
2064
|
+
config_file = path / CLONEBOX_CONFIG_FILE if path.is_dir() else path
|
|
2065
|
+
|
|
2066
|
+
if not config_file.exists():
|
|
2067
|
+
raise FileNotFoundError(f"Config file not found: {config_file}")
|
|
2068
|
+
|
|
2069
|
+
# Load .env file from same directory
|
|
2070
|
+
config_dir = config_file.parent
|
|
2071
|
+
env_file = config_dir / CLONEBOX_ENV_FILE
|
|
2072
|
+
env_vars = load_env_file(env_file)
|
|
2073
|
+
|
|
2074
|
+
# Load YAML config
|
|
2075
|
+
with open(config_file) as f:
|
|
2076
|
+
config = yaml.safe_load(f)
|
|
2077
|
+
|
|
2078
|
+
# Expand environment variables in config
|
|
2079
|
+
config = expand_env_vars(config, env_vars)
|
|
2080
|
+
|
|
2081
|
+
return config
|
|
2082
|
+
|
|
2083
|
+
|
|
2084
|
+
def monitor_cloud_init_status(vm_name: str, user_session: bool = False, timeout: int = 900):
|
|
2085
|
+
"""Monitor cloud-init status in VM and show progress."""
|
|
2086
|
+
import subprocess
|
|
2087
|
+
import time
|
|
2088
|
+
|
|
2089
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
2090
|
+
start_time = time.time()
|
|
2091
|
+
shutdown_count = 0 # Count consecutive shutdown detections
|
|
2092
|
+
restart_detected = False
|
|
2093
|
+
|
|
2094
|
+
with Progress(
|
|
2095
|
+
SpinnerColumn(),
|
|
2096
|
+
TextColumn("[progress.description]{task.description}"),
|
|
2097
|
+
console=console,
|
|
2098
|
+
) as progress:
|
|
2099
|
+
task = progress.add_task("[cyan]Starting VM and initializing...", total=None)
|
|
2100
|
+
|
|
2101
|
+
while time.time() - start_time < timeout:
|
|
2102
|
+
try:
|
|
2103
|
+
elapsed = int(time.time() - start_time)
|
|
2104
|
+
minutes = elapsed // 60
|
|
2105
|
+
seconds = elapsed % 60
|
|
2106
|
+
|
|
2107
|
+
# Check VM state
|
|
2108
|
+
result = subprocess.run(
|
|
2109
|
+
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
2110
|
+
capture_output=True,
|
|
2111
|
+
text=True,
|
|
2112
|
+
timeout=5
|
|
2113
|
+
)
|
|
2114
|
+
|
|
2115
|
+
vm_state = result.stdout.strip().lower()
|
|
2116
|
+
|
|
2117
|
+
if "shut off" in vm_state or "shutting down" in vm_state:
|
|
2118
|
+
# VM is shutting down - count consecutive detections
|
|
2119
|
+
shutdown_count += 1
|
|
2120
|
+
if shutdown_count >= 3 and not restart_detected:
|
|
2121
|
+
# Confirmed shutdown after 3 consecutive checks
|
|
2122
|
+
restart_detected = True
|
|
2123
|
+
progress.update(task, description="[yellow]⟳ VM restarting after package installation...")
|
|
2124
|
+
time.sleep(3)
|
|
2125
|
+
continue
|
|
2126
|
+
else:
|
|
2127
|
+
# VM is running - reset shutdown counter
|
|
2128
|
+
if shutdown_count > 0 and shutdown_count < 3:
|
|
2129
|
+
# Was a brief glitch, not a real shutdown
|
|
2130
|
+
shutdown_count = 0
|
|
2131
|
+
|
|
2132
|
+
if restart_detected and "running" in vm_state and shutdown_count >= 3:
|
|
2133
|
+
# VM restarted successfully - GUI should be ready
|
|
2134
|
+
progress.update(task, description=f"[green]✓ GUI ready! Total time: {minutes}m {seconds}s")
|
|
2135
|
+
time.sleep(2)
|
|
2136
|
+
break
|
|
2137
|
+
|
|
2138
|
+
# Estimate remaining time (total ~12-15 minutes for full desktop install)
|
|
2139
|
+
if elapsed < 60:
|
|
2140
|
+
remaining = "~12-15 minutes"
|
|
2141
|
+
elif elapsed < 300:
|
|
2142
|
+
remaining = f"~{12 - minutes} minutes"
|
|
2143
|
+
elif elapsed < 600:
|
|
2144
|
+
remaining = f"~{10 - minutes} minutes"
|
|
2145
|
+
elif elapsed < 800:
|
|
2146
|
+
remaining = "finishing soon..."
|
|
2147
|
+
else:
|
|
2148
|
+
remaining = "almost done"
|
|
2149
|
+
|
|
2150
|
+
if restart_detected:
|
|
2151
|
+
progress.update(task, description=f"[cyan]Starting GUI... ({minutes}m {seconds}s, {remaining})")
|
|
2152
|
+
else:
|
|
2153
|
+
progress.update(task, description=f"[cyan]Installing desktop packages... ({minutes}m {seconds}s, {remaining})")
|
|
2154
|
+
|
|
2155
|
+
except (subprocess.TimeoutExpired, Exception) as e:
|
|
2156
|
+
elapsed = int(time.time() - start_time)
|
|
2157
|
+
minutes = elapsed // 60
|
|
2158
|
+
seconds = elapsed % 60
|
|
2159
|
+
progress.update(task, description=f"[cyan]Configuring VM... ({minutes}m {seconds}s)")
|
|
2160
|
+
|
|
2161
|
+
time.sleep(3)
|
|
2162
|
+
|
|
2163
|
+
# Final status
|
|
2164
|
+
if time.time() - start_time >= timeout:
|
|
2165
|
+
progress.update(task, description="[yellow]⚠ Monitoring timeout - VM continues in background")
|
|
2166
|
+
|
|
2167
|
+
|
|
2168
|
+
def create_vm_from_config(
|
|
2169
|
+
config: dict,
|
|
2170
|
+
start: bool = False,
|
|
2171
|
+
user_session: bool = False,
|
|
2172
|
+
replace: bool = False,
|
|
2173
|
+
) -> str:
|
|
2174
|
+
"""Create VM from YAML config dict."""
|
|
2175
|
+
# Merge paths and app_data_paths
|
|
2176
|
+
all_paths = config.get("paths", {}).copy()
|
|
2177
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
2178
|
+
|
|
2179
|
+
vm_config = VMConfig(
|
|
2180
|
+
name=config["vm"]["name"],
|
|
2181
|
+
ram_mb=config["vm"].get("ram_mb", 4096),
|
|
2182
|
+
vcpus=config["vm"].get("vcpus", 4),
|
|
2183
|
+
disk_size_gb=config["vm"].get("disk_size_gb", 10),
|
|
2184
|
+
gui=config["vm"].get("gui", True),
|
|
2185
|
+
base_image=config["vm"].get("base_image"),
|
|
2186
|
+
paths=all_paths,
|
|
2187
|
+
packages=config.get("packages", []),
|
|
2188
|
+
snap_packages=config.get("snap_packages", []),
|
|
2189
|
+
services=config.get("services", []),
|
|
2190
|
+
post_commands=config.get("post_commands", []),
|
|
2191
|
+
user_session=user_session,
|
|
2192
|
+
network_mode=config["vm"].get("network_mode", "auto"),
|
|
2193
|
+
username=config["vm"].get("username", "ubuntu"),
|
|
2194
|
+
password=config["vm"].get("password", "ubuntu"),
|
|
2195
|
+
)
|
|
2196
|
+
|
|
2197
|
+
cloner = SelectiveVMCloner(user_session=user_session)
|
|
2198
|
+
|
|
2199
|
+
# Check prerequisites and show detailed info
|
|
2200
|
+
checks = cloner.check_prerequisites()
|
|
2201
|
+
|
|
2202
|
+
if not checks["images_dir_writable"]:
|
|
2203
|
+
console.print(f"[yellow]⚠️ Storage directory: {checks['images_dir']}[/]")
|
|
2204
|
+
if "images_dir_error" in checks:
|
|
2205
|
+
console.print(f"[red]{checks['images_dir_error']}[/]")
|
|
2206
|
+
raise PermissionError(checks["images_dir_error"])
|
|
2207
|
+
|
|
2208
|
+
console.print(f"[dim]Session: {checks['session_type']}, Storage: {checks['images_dir']}[/]")
|
|
2209
|
+
|
|
2210
|
+
vm_uuid = cloner.create_vm(vm_config, console=console, replace=replace)
|
|
2211
|
+
|
|
2212
|
+
if start:
|
|
2213
|
+
cloner.start_vm(vm_config.name, open_viewer=vm_config.gui, console=console)
|
|
2214
|
+
|
|
2215
|
+
# Monitor cloud-init progress if GUI is enabled
|
|
2216
|
+
if vm_config.gui:
|
|
2217
|
+
console.print("\n[bold cyan]📊 Monitoring setup progress...[/]")
|
|
2218
|
+
try:
|
|
2219
|
+
monitor_cloud_init_status(vm_config.name, user_session=user_session)
|
|
2220
|
+
except KeyboardInterrupt:
|
|
2221
|
+
console.print("\n[yellow]Monitoring stopped. VM continues setup in background.[/]")
|
|
2222
|
+
except Exception as e:
|
|
2223
|
+
console.print(f"\n[dim]Note: Could not monitor status ({e}). VM continues setup in background.[/]")
|
|
2224
|
+
|
|
2225
|
+
return vm_uuid
|
|
2226
|
+
|
|
2227
|
+
|
|
2228
|
+
def cmd_clone(args):
|
|
2229
|
+
"""Generate clone config from path and optionally create VM."""
|
|
2230
|
+
target_path = Path(args.path).resolve()
|
|
2231
|
+
dry_run = getattr(args, "dry_run", False)
|
|
2232
|
+
|
|
2233
|
+
if not target_path.exists():
|
|
2234
|
+
console.print(f"[red]❌ Path does not exist: {target_path}[/]")
|
|
2235
|
+
return
|
|
2236
|
+
|
|
2237
|
+
if dry_run:
|
|
2238
|
+
console.print(f"[bold cyan]🔍 DRY RUN - Analyzing: {target_path}[/]\n")
|
|
2239
|
+
else:
|
|
2240
|
+
console.print(f"[bold cyan]📦 Generating clone config for: {target_path}[/]\n")
|
|
2241
|
+
|
|
2242
|
+
# Detect system state
|
|
2243
|
+
with Progress(
|
|
2244
|
+
SpinnerColumn(),
|
|
2245
|
+
TextColumn("[progress.description]{task.description}"),
|
|
2246
|
+
console=console,
|
|
2247
|
+
transient=True,
|
|
2248
|
+
) as progress:
|
|
2249
|
+
progress.add_task("Scanning system...", total=None)
|
|
2250
|
+
detector = SystemDetector()
|
|
2251
|
+
snapshot = detector.detect_all()
|
|
2252
|
+
|
|
2253
|
+
# Generate config
|
|
2254
|
+
vm_name = args.name or f"clone-{target_path.name}"
|
|
2255
|
+
yaml_content = generate_clonebox_yaml(
|
|
2256
|
+
snapshot,
|
|
2257
|
+
detector,
|
|
2258
|
+
deduplicate=args.dedupe,
|
|
2259
|
+
target_path=str(target_path),
|
|
2260
|
+
vm_name=vm_name,
|
|
2261
|
+
network_mode=args.network,
|
|
2262
|
+
base_image=getattr(args, "base_image", None),
|
|
2263
|
+
disk_size_gb=getattr(args, "disk_size_gb", None),
|
|
2264
|
+
)
|
|
2265
|
+
|
|
2266
|
+
profile_name = getattr(args, "profile", None)
|
|
2267
|
+
if profile_name:
|
|
2268
|
+
merged_config = merge_with_profile(yaml.safe_load(yaml_content), profile_name)
|
|
2269
|
+
if isinstance(merged_config, dict):
|
|
2270
|
+
vm_section = merged_config.get("vm")
|
|
2271
|
+
if isinstance(vm_section, dict):
|
|
2272
|
+
vm_packages = vm_section.pop("packages", None)
|
|
2273
|
+
if isinstance(vm_packages, list):
|
|
2274
|
+
packages = merged_config.get("packages")
|
|
2275
|
+
if not isinstance(packages, list):
|
|
2276
|
+
packages = []
|
|
2277
|
+
for p in vm_packages:
|
|
2278
|
+
if p not in packages:
|
|
2279
|
+
packages.append(p)
|
|
2280
|
+
merged_config["packages"] = packages
|
|
2281
|
+
|
|
2282
|
+
if "container" in merged_config:
|
|
2283
|
+
merged_config.pop("container", None)
|
|
2284
|
+
|
|
2285
|
+
yaml_content = yaml.dump(
|
|
2286
|
+
merged_config,
|
|
2287
|
+
default_flow_style=False,
|
|
2288
|
+
allow_unicode=True,
|
|
2289
|
+
sort_keys=False,
|
|
2290
|
+
)
|
|
2291
|
+
|
|
2292
|
+
# Dry run - show what would be created and exit
|
|
2293
|
+
if dry_run:
|
|
2294
|
+
config = yaml.safe_load(yaml_content)
|
|
2295
|
+
console.print(Panel(
|
|
2296
|
+
f"[bold]VM Name:[/] {config['vm']['name']}\n"
|
|
2297
|
+
f"[bold]RAM:[/] {config['vm'].get('ram_mb', 4096)} MB\n"
|
|
2298
|
+
f"[bold]vCPUs:[/] {config['vm'].get('vcpus', 4)}\n"
|
|
2299
|
+
f"[bold]Network:[/] {config['vm'].get('network_mode', 'auto')}\n"
|
|
2300
|
+
f"[bold]Paths:[/] {len(config.get('paths', {}))} mounts\n"
|
|
2301
|
+
f"[bold]Packages:[/] {len(config.get('packages', []))} packages\n"
|
|
2302
|
+
f"[bold]Services:[/] {len(config.get('services', []))} services",
|
|
2303
|
+
title="[bold cyan]Would create VM[/]",
|
|
2304
|
+
border_style="cyan",
|
|
2305
|
+
))
|
|
2306
|
+
console.print("\n[dim]Config preview:[/]")
|
|
2307
|
+
console.print(Panel(yaml_content, title="[bold].clonebox.yaml[/]", border_style="dim"))
|
|
2308
|
+
console.print("\n[yellow]ℹ️ Dry run complete. No changes made.[/]")
|
|
2309
|
+
return
|
|
2310
|
+
|
|
2311
|
+
# Save config file
|
|
2312
|
+
config_file = (
|
|
2313
|
+
target_path / CLONEBOX_CONFIG_FILE
|
|
2314
|
+
if target_path.is_dir()
|
|
2315
|
+
else target_path.parent / CLONEBOX_CONFIG_FILE
|
|
2316
|
+
)
|
|
2317
|
+
config_file.write_text(yaml_content)
|
|
2318
|
+
console.print(f"[green]✅ Config saved: {config_file}[/]\n")
|
|
2319
|
+
|
|
2320
|
+
# Show config
|
|
2321
|
+
console.print(Panel(yaml_content, title="[bold].clonebox.yaml[/]", border_style="cyan"))
|
|
2322
|
+
|
|
2323
|
+
# Open in editor if requested
|
|
2324
|
+
if args.edit:
|
|
2325
|
+
editor = os.environ.get("EDITOR", "nano")
|
|
2326
|
+
console.print(f"[cyan]Opening {editor}...[/]")
|
|
2327
|
+
os.system(f"{editor} {config_file}")
|
|
2328
|
+
# Reload after edit
|
|
2329
|
+
yaml_content = config_file.read_text()
|
|
2330
|
+
|
|
2331
|
+
# Ask to create VM
|
|
2332
|
+
if args.run:
|
|
2333
|
+
create_now = True
|
|
2334
|
+
else:
|
|
2335
|
+
create_now = questionary.confirm(
|
|
2336
|
+
"Create VM with this config?", default=True, style=custom_style
|
|
2337
|
+
).ask()
|
|
2338
|
+
|
|
2339
|
+
if create_now:
|
|
2340
|
+
# Load config with environment variable expansion
|
|
2341
|
+
config = load_clonebox_config(config_file.parent)
|
|
2342
|
+
user_session = getattr(args, "user", False)
|
|
2343
|
+
|
|
2344
|
+
console.print("\n[bold cyan]🔧 Creating VM...[/]\n")
|
|
2345
|
+
if user_session:
|
|
2346
|
+
console.print("[cyan]Using user session (qemu:///session) - no root required[/]")
|
|
2347
|
+
|
|
2348
|
+
try:
|
|
2349
|
+
vm_uuid = create_vm_from_config(
|
|
2350
|
+
config,
|
|
2351
|
+
start=True,
|
|
2352
|
+
user_session=user_session,
|
|
2353
|
+
replace=getattr(args, "replace", False),
|
|
2354
|
+
)
|
|
2355
|
+
console.print(f"\n[bold green]🎉 VM '{config['vm']['name']}' is running![/]")
|
|
2356
|
+
console.print(f"[dim]UUID: {vm_uuid}[/]")
|
|
2357
|
+
|
|
2358
|
+
# Show GUI startup info if GUI is enabled
|
|
2359
|
+
if config.get("vm", {}).get("gui", False):
|
|
2360
|
+
username = config['vm'].get('username', 'ubuntu')
|
|
2361
|
+
password = config['vm'].get('password', 'ubuntu')
|
|
2362
|
+
console.print("\n[bold yellow]⏰ GUI Setup Process:[/]")
|
|
2363
|
+
console.print(" [yellow]•[/] Installing desktop environment (~5-10 minutes)")
|
|
2364
|
+
console.print(" [yellow]•[/] Running health checks on all components")
|
|
2365
|
+
console.print(" [yellow]•[/] Automatic restart after installation")
|
|
2366
|
+
console.print(" [yellow]•[/] GUI login screen will appear")
|
|
2367
|
+
console.print(f" [yellow]•[/] Login: [cyan]{username}[/] / [cyan]{'*' * len(password)}[/] (from .env)")
|
|
2368
|
+
console.print("\n[dim]💡 Progress will be monitored automatically below[/]")
|
|
2369
|
+
|
|
2370
|
+
# Show health check info
|
|
2371
|
+
console.print("\n[bold]📊 Health Check (inside VM):[/]")
|
|
2372
|
+
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # View full report")
|
|
2373
|
+
console.print(" [cyan]cat /var/log/clonebox-health-status[/] # Quick status")
|
|
2374
|
+
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
2375
|
+
|
|
2376
|
+
# Show mount instructions
|
|
2377
|
+
all_paths = config.get("paths", {}).copy()
|
|
2378
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
2379
|
+
if all_paths:
|
|
2380
|
+
console.print("\n[bold]📁 Mounted paths (automatic):[/]")
|
|
2381
|
+
for idx, (host, guest) in enumerate(list(all_paths.items())[:5]):
|
|
2382
|
+
console.print(f" [dim]{host}[/] → [cyan]{guest}[/]")
|
|
2383
|
+
if len(all_paths) > 5:
|
|
2384
|
+
console.print(f" [dim]... and {len(all_paths) - 5} more paths[/]")
|
|
2385
|
+
except PermissionError as e:
|
|
2386
|
+
console.print(f"[red]❌ Permission Error:[/]\n{e}")
|
|
2387
|
+
console.print("\n[yellow]💡 Try running with --user flag:[/]")
|
|
2388
|
+
console.print(f" [cyan]clonebox clone {target_path} --user[/]")
|
|
2389
|
+
except Exception as e:
|
|
2390
|
+
console.print(f"[red]❌ Error: {e}[/]")
|
|
2391
|
+
else:
|
|
2392
|
+
console.print("\n[dim]To create VM later, run:[/]")
|
|
2393
|
+
console.print(f" [cyan]clonebox start {target_path}[/]")
|
|
2394
|
+
|
|
2395
|
+
|
|
2396
|
+
def cmd_detect(args):
|
|
2397
|
+
"""Detect and show system state."""
|
|
2398
|
+
console.print("[bold cyan]🔍 Detecting system state...[/]\n")
|
|
2399
|
+
|
|
2400
|
+
detector = SystemDetector()
|
|
2401
|
+
snapshot = detector.detect_all()
|
|
2402
|
+
|
|
2403
|
+
# JSON output
|
|
2404
|
+
if args.json:
|
|
2405
|
+
result = {
|
|
2406
|
+
"services": [{"name": s.name, "status": s.status} for s in snapshot.running_services],
|
|
2407
|
+
"applications": [
|
|
2408
|
+
{"name": a.name, "pid": a.pid, "cwd": a.working_dir} for a in snapshot.applications
|
|
2409
|
+
],
|
|
2410
|
+
"paths": [
|
|
2411
|
+
{"path": p.path, "type": p.type, "size_mb": p.size_mb} for p in snapshot.paths
|
|
2412
|
+
],
|
|
2413
|
+
}
|
|
2414
|
+
print(json.dumps(result, indent=2))
|
|
2415
|
+
return
|
|
2416
|
+
|
|
2417
|
+
# YAML output
|
|
2418
|
+
if args.yaml:
|
|
2419
|
+
result = generate_clonebox_yaml(snapshot, detector, deduplicate=args.dedupe)
|
|
2420
|
+
|
|
2421
|
+
if args.output:
|
|
2422
|
+
output_path = Path(args.output)
|
|
2423
|
+
output_path.write_text(result)
|
|
2424
|
+
console.print(f"[green]✅ Config saved to: {output_path}[/]")
|
|
2425
|
+
else:
|
|
2426
|
+
print(result)
|
|
2427
|
+
return
|
|
2428
|
+
|
|
2429
|
+
# Services
|
|
2430
|
+
services = detector.detect_services()
|
|
2431
|
+
running = [s for s in services if s.status == "running"]
|
|
2432
|
+
|
|
2433
|
+
if running:
|
|
2434
|
+
table = Table(title="Running Services", border_style="green")
|
|
2435
|
+
table.add_column("Service")
|
|
2436
|
+
table.add_column("Status")
|
|
2437
|
+
table.add_column("Enabled")
|
|
2438
|
+
|
|
2439
|
+
for svc in running:
|
|
2440
|
+
table.add_row(svc.name, f"[green]{svc.status}[/]", "✓" if svc.enabled else "")
|
|
2441
|
+
|
|
2442
|
+
console.print(table)
|
|
2443
|
+
|
|
2444
|
+
# Applications
|
|
2445
|
+
apps = detector.detect_applications()
|
|
2446
|
+
|
|
2447
|
+
if apps:
|
|
2448
|
+
console.print()
|
|
2449
|
+
table = Table(title="Running Applications", border_style="blue")
|
|
2450
|
+
table.add_column("Name")
|
|
2451
|
+
table.add_column("PID")
|
|
2452
|
+
table.add_column("Memory")
|
|
2453
|
+
table.add_column("Working Dir")
|
|
2454
|
+
|
|
2455
|
+
for app in apps[:15]:
|
|
2456
|
+
table.add_row(
|
|
2457
|
+
app.name,
|
|
2458
|
+
str(app.pid),
|
|
2459
|
+
f"{app.memory_mb:.0f} MB",
|
|
2460
|
+
app.working_dir[:40] if app.working_dir else "",
|
|
2461
|
+
)
|
|
2462
|
+
|
|
2463
|
+
console.print(table)
|
|
2464
|
+
|
|
2465
|
+
# Paths
|
|
2466
|
+
paths = detector.detect_paths()
|
|
2467
|
+
|
|
2468
|
+
if paths:
|
|
2469
|
+
console.print()
|
|
2470
|
+
table = Table(title="Detected Paths", border_style="yellow")
|
|
2471
|
+
table.add_column("Type")
|
|
2472
|
+
table.add_column("Path")
|
|
2473
|
+
table.add_column("Size")
|
|
2474
|
+
|
|
2475
|
+
for p in paths[:20]:
|
|
2476
|
+
table.add_row(
|
|
2477
|
+
f"[cyan]{p.type}[/]", p.path, f"{p.size_mb:.0f} MB" if p.size_mb > 0 else "-"
|
|
2478
|
+
)
|
|
2479
|
+
|
|
2480
|
+
console.print(table)
|
|
2481
|
+
|
|
2482
|
+
|
|
2483
|
+
def main():
|
|
2484
|
+
"""Main entry point."""
|
|
2485
|
+
parser = argparse.ArgumentParser(
|
|
2486
|
+
prog="clonebox", description="Clone your workstation environment to an isolated VM"
|
|
2487
|
+
)
|
|
2488
|
+
parser.add_argument("--version", action="version", version=f"clonebox {__version__}")
|
|
2489
|
+
|
|
2490
|
+
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
|
2491
|
+
|
|
2492
|
+
# Interactive mode (default)
|
|
2493
|
+
parser.set_defaults(func=lambda args: interactive_mode())
|
|
2494
|
+
|
|
2495
|
+
# Create command
|
|
2496
|
+
create_parser = subparsers.add_parser("create", help="Create VM from config")
|
|
2497
|
+
create_parser.add_argument("--name", "-n", default="clonebox-vm", help="VM name")
|
|
2498
|
+
create_parser.add_argument(
|
|
2499
|
+
"--config",
|
|
2500
|
+
"-c",
|
|
2501
|
+
required=True,
|
|
2502
|
+
help='JSON config: {"paths": {}, "packages": [], "services": []}',
|
|
2503
|
+
)
|
|
2504
|
+
create_parser.add_argument("--ram", type=int, default=4096, help="RAM in MB")
|
|
2505
|
+
create_parser.add_argument("--vcpus", type=int, default=4, help="Number of vCPUs")
|
|
2506
|
+
create_parser.add_argument(
|
|
2507
|
+
"--disk-size-gb",
|
|
2508
|
+
type=int,
|
|
2509
|
+
default=10,
|
|
2510
|
+
help="Root disk size in GB (default: 10)",
|
|
2511
|
+
)
|
|
2512
|
+
create_parser.add_argument("--base-image", help="Path to base qcow2 image")
|
|
2513
|
+
create_parser.add_argument("--no-gui", action="store_true", help="Disable SPICE graphics")
|
|
2514
|
+
create_parser.add_argument("--start", "-s", action="store_true", help="Start VM after creation")
|
|
2515
|
+
create_parser.set_defaults(func=cmd_create)
|
|
2516
|
+
|
|
2517
|
+
# Start command
|
|
2518
|
+
start_parser = subparsers.add_parser("start", help="Start a VM")
|
|
2519
|
+
start_parser.add_argument(
|
|
2520
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2521
|
+
)
|
|
2522
|
+
start_parser.add_argument("--no-viewer", action="store_true", help="Don't open virt-viewer")
|
|
2523
|
+
start_parser.add_argument("--viewer", action="store_true", help="Open virt-viewer GUI")
|
|
2524
|
+
start_parser.add_argument(
|
|
2525
|
+
"-u",
|
|
2526
|
+
"--user",
|
|
2527
|
+
action="store_true",
|
|
2528
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2529
|
+
)
|
|
2530
|
+
start_parser.set_defaults(func=cmd_start)
|
|
2531
|
+
|
|
2532
|
+
# Open command - open VM viewer
|
|
2533
|
+
open_parser = subparsers.add_parser("open", help="Open VM viewer window")
|
|
2534
|
+
open_parser.add_argument(
|
|
2535
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2536
|
+
)
|
|
2537
|
+
open_parser.add_argument(
|
|
2538
|
+
"-u",
|
|
2539
|
+
"--user",
|
|
2540
|
+
action="store_true",
|
|
2541
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2542
|
+
)
|
|
2543
|
+
open_parser.set_defaults(func=cmd_open)
|
|
2544
|
+
|
|
2545
|
+
# Stop command
|
|
2546
|
+
stop_parser = subparsers.add_parser("stop", help="Stop a VM")
|
|
2547
|
+
stop_parser.add_argument("name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml")
|
|
2548
|
+
stop_parser.add_argument("--force", "-f", action="store_true", help="Force stop")
|
|
2549
|
+
stop_parser.add_argument(
|
|
2550
|
+
"-u",
|
|
2551
|
+
"--user",
|
|
2552
|
+
action="store_true",
|
|
2553
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2554
|
+
)
|
|
2555
|
+
stop_parser.set_defaults(func=cmd_stop)
|
|
2556
|
+
|
|
2557
|
+
# Restart command
|
|
2558
|
+
restart_parser = subparsers.add_parser("restart", help="Restart a VM (stop and start)")
|
|
2559
|
+
restart_parser.add_argument("name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml")
|
|
2560
|
+
restart_parser.add_argument(
|
|
2561
|
+
"-f",
|
|
2562
|
+
"--force",
|
|
2563
|
+
action="store_true",
|
|
2564
|
+
help="Force stop if VM is stuck",
|
|
2565
|
+
)
|
|
2566
|
+
restart_parser.add_argument(
|
|
2567
|
+
"-u",
|
|
2568
|
+
"--user",
|
|
2569
|
+
action="store_true",
|
|
2570
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2571
|
+
)
|
|
2572
|
+
restart_parser.add_argument(
|
|
2573
|
+
"--open",
|
|
2574
|
+
action="store_true",
|
|
2575
|
+
help="Open GUI after restart",
|
|
2576
|
+
)
|
|
2577
|
+
restart_parser.set_defaults(func=cmd_restart)
|
|
2578
|
+
|
|
2579
|
+
# Delete command
|
|
2580
|
+
delete_parser = subparsers.add_parser("delete", help="Delete a VM")
|
|
2581
|
+
delete_parser.add_argument("name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml")
|
|
2582
|
+
delete_parser.add_argument("--yes", "-y", action="store_true", help="Skip confirmation")
|
|
2583
|
+
delete_parser.add_argument("--keep-storage", action="store_true", help="Keep disk images")
|
|
2584
|
+
delete_parser.add_argument(
|
|
2585
|
+
"-u",
|
|
2586
|
+
"--user",
|
|
2587
|
+
action="store_true",
|
|
2588
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2589
|
+
)
|
|
2590
|
+
delete_parser.set_defaults(func=cmd_delete)
|
|
2591
|
+
|
|
2592
|
+
# List command
|
|
2593
|
+
list_parser = subparsers.add_parser("list", aliases=["ls"], help="List VMs")
|
|
2594
|
+
list_parser.add_argument(
|
|
2595
|
+
"-u",
|
|
2596
|
+
"--user",
|
|
2597
|
+
action="store_true",
|
|
2598
|
+
help="Use user session (qemu:///session) - no root required",
|
|
2599
|
+
)
|
|
2600
|
+
list_parser.add_argument("--json", action="store_true", help="Output JSON")
|
|
2601
|
+
list_parser.set_defaults(func=cmd_list)
|
|
2602
|
+
|
|
2603
|
+
# Container command
|
|
2604
|
+
container_parser = subparsers.add_parser("container", help="Manage container sandboxes")
|
|
2605
|
+
container_parser.add_argument(
|
|
2606
|
+
"--engine",
|
|
2607
|
+
choices=["auto", "podman", "docker"],
|
|
2608
|
+
default="auto",
|
|
2609
|
+
help="Container engine: auto (default), podman, docker",
|
|
2610
|
+
)
|
|
2611
|
+
container_parser.set_defaults(func=lambda args, p=container_parser: p.print_help())
|
|
2612
|
+
container_sub = container_parser.add_subparsers(dest="container_command", help="Container commands")
|
|
2613
|
+
|
|
2614
|
+
container_up = container_sub.add_parser("up", help="Start container")
|
|
2615
|
+
container_up.add_argument(
|
|
2616
|
+
"--engine",
|
|
2617
|
+
choices=["auto", "podman", "docker"],
|
|
2618
|
+
default=argparse.SUPPRESS,
|
|
2619
|
+
help="Container engine: auto (default), podman, docker",
|
|
2620
|
+
)
|
|
2621
|
+
container_up.add_argument("path", nargs="?", default=".", help="Workspace path")
|
|
2622
|
+
container_up.add_argument("--name", help="Container name")
|
|
2623
|
+
container_up.add_argument("--image", default="ubuntu:22.04", help="Container image")
|
|
2624
|
+
container_up.add_argument("--detach", action="store_true", help="Run container in background")
|
|
2625
|
+
container_up.add_argument(
|
|
2626
|
+
"--profile",
|
|
2627
|
+
help="Profile name (loads ~/.clonebox.d/<name>.yaml, .clonebox.d/<name>.yaml, or built-in templates)",
|
|
2628
|
+
)
|
|
2629
|
+
container_up.add_argument(
|
|
2630
|
+
"--mount",
|
|
2631
|
+
action="append",
|
|
2632
|
+
default=[],
|
|
2633
|
+
help="Extra mount HOST:CONTAINER (repeatable)",
|
|
2634
|
+
)
|
|
2635
|
+
container_up.add_argument(
|
|
2636
|
+
"--port",
|
|
2637
|
+
action="append",
|
|
2638
|
+
default=[],
|
|
2639
|
+
help="Port mapping (e.g. 8080:80) (repeatable)",
|
|
2640
|
+
)
|
|
2641
|
+
container_up.add_argument(
|
|
2642
|
+
"--package",
|
|
2643
|
+
action="append",
|
|
2644
|
+
default=[],
|
|
2645
|
+
help="APT package to install in image (repeatable)",
|
|
2646
|
+
)
|
|
2647
|
+
container_up.add_argument(
|
|
2648
|
+
"--no-dotenv",
|
|
2649
|
+
action="store_true",
|
|
2650
|
+
help="Do not load env vars from workspace .env",
|
|
2651
|
+
)
|
|
2652
|
+
container_up.set_defaults(func=cmd_container_up)
|
|
2653
|
+
|
|
2654
|
+
container_ps = container_sub.add_parser("ps", aliases=["ls"], help="List containers")
|
|
2655
|
+
container_ps.add_argument(
|
|
2656
|
+
"--engine",
|
|
2657
|
+
choices=["auto", "podman", "docker"],
|
|
2658
|
+
default=argparse.SUPPRESS,
|
|
2659
|
+
help="Container engine: auto (default), podman, docker",
|
|
2660
|
+
)
|
|
2661
|
+
container_ps.add_argument("-a", "--all", action="store_true", help="Show all containers")
|
|
2662
|
+
container_ps.add_argument("--json", action="store_true", help="Output JSON")
|
|
2663
|
+
container_ps.set_defaults(func=cmd_container_ps)
|
|
2664
|
+
|
|
2665
|
+
container_stop = container_sub.add_parser("stop", help="Stop container")
|
|
2666
|
+
container_stop.add_argument(
|
|
2667
|
+
"--engine",
|
|
2668
|
+
choices=["auto", "podman", "docker"],
|
|
2669
|
+
default=argparse.SUPPRESS,
|
|
2670
|
+
help="Container engine: auto (default), podman, docker",
|
|
2671
|
+
)
|
|
2672
|
+
container_stop.add_argument("name", help="Container name")
|
|
2673
|
+
container_stop.set_defaults(func=cmd_container_stop)
|
|
2674
|
+
|
|
2675
|
+
container_rm = container_sub.add_parser("rm", help="Remove container")
|
|
2676
|
+
container_rm.add_argument(
|
|
2677
|
+
"--engine",
|
|
2678
|
+
choices=["auto", "podman", "docker"],
|
|
2679
|
+
default=argparse.SUPPRESS,
|
|
2680
|
+
help="Container engine: auto (default), podman, docker",
|
|
2681
|
+
)
|
|
2682
|
+
container_rm.add_argument("name", help="Container name")
|
|
2683
|
+
container_rm.add_argument("-f", "--force", action="store_true", help="Force remove")
|
|
2684
|
+
container_rm.set_defaults(func=cmd_container_rm)
|
|
2685
|
+
|
|
2686
|
+
container_down = container_sub.add_parser("down", help="Stop and remove container")
|
|
2687
|
+
container_down.add_argument(
|
|
2688
|
+
"--engine",
|
|
2689
|
+
choices=["auto", "podman", "docker"],
|
|
2690
|
+
default=argparse.SUPPRESS,
|
|
2691
|
+
help="Container engine: auto (default), podman, docker",
|
|
2692
|
+
)
|
|
2693
|
+
container_down.add_argument("name", help="Container name")
|
|
2694
|
+
container_down.set_defaults(func=cmd_container_down)
|
|
2695
|
+
|
|
2696
|
+
# Dashboard command
|
|
2697
|
+
dashboard_parser = subparsers.add_parser("dashboard", help="Run local dashboard")
|
|
2698
|
+
dashboard_parser.add_argument("--port", type=int, default=8080, help="Port to bind (default: 8080)")
|
|
2699
|
+
dashboard_parser.set_defaults(func=cmd_dashboard)
|
|
2700
|
+
|
|
2701
|
+
# Detect command
|
|
2702
|
+
detect_parser = subparsers.add_parser("detect", help="Detect system state")
|
|
2703
|
+
detect_parser.add_argument("--json", action="store_true", help="Output as JSON")
|
|
2704
|
+
detect_parser.add_argument("--yaml", action="store_true", help="Output as YAML config")
|
|
2705
|
+
detect_parser.add_argument("--dedupe", action="store_true", help="Remove duplicate entries")
|
|
2706
|
+
detect_parser.add_argument("-o", "--output", help="Save output to file")
|
|
2707
|
+
detect_parser.set_defaults(func=cmd_detect)
|
|
2708
|
+
|
|
2709
|
+
# Clone command
|
|
2710
|
+
clone_parser = subparsers.add_parser("clone", help="Generate clone config from path")
|
|
2711
|
+
clone_parser.add_argument(
|
|
2712
|
+
"path", nargs="?", default=".", help="Path to clone (default: current dir)"
|
|
2713
|
+
)
|
|
2714
|
+
clone_parser.add_argument("--name", "-n", help="VM name (default: directory name)")
|
|
2715
|
+
clone_parser.add_argument(
|
|
2716
|
+
"--run", "-r", action="store_true", help="Create and start VM immediately"
|
|
2717
|
+
)
|
|
2718
|
+
clone_parser.add_argument(
|
|
2719
|
+
"--edit", "-e", action="store_true", help="Open config in editor before creating"
|
|
2720
|
+
)
|
|
2721
|
+
clone_parser.add_argument(
|
|
2722
|
+
"--dedupe", action="store_true", default=True, help="Remove duplicate entries"
|
|
2723
|
+
)
|
|
2724
|
+
clone_parser.add_argument(
|
|
2725
|
+
"--user",
|
|
2726
|
+
"-u",
|
|
2727
|
+
action="store_true",
|
|
2728
|
+
help="Use user session (qemu:///session) - no root required, stores in ~/.local/share/libvirt/",
|
|
2729
|
+
)
|
|
2730
|
+
clone_parser.add_argument(
|
|
2731
|
+
"--network",
|
|
2732
|
+
choices=["auto", "default", "user"],
|
|
2733
|
+
default="auto",
|
|
2734
|
+
help="Network mode: auto (default), default (libvirt network), user (slirp)",
|
|
2735
|
+
)
|
|
2736
|
+
clone_parser.add_argument(
|
|
2737
|
+
"--base-image",
|
|
2738
|
+
help="Path to a bootable qcow2 image to use as a base disk",
|
|
2739
|
+
)
|
|
2740
|
+
clone_parser.add_argument(
|
|
2741
|
+
"--disk-size-gb",
|
|
2742
|
+
type=int,
|
|
2743
|
+
default=None,
|
|
2744
|
+
help="Root disk size in GB (default: 20 for generated configs)",
|
|
2745
|
+
)
|
|
2746
|
+
clone_parser.add_argument(
|
|
2747
|
+
"--profile",
|
|
2748
|
+
help="Profile name (loads ~/.clonebox.d/<name>.yaml, .clonebox.d/<name>.yaml, or built-in templates)",
|
|
2749
|
+
)
|
|
2750
|
+
clone_parser.add_argument(
|
|
2751
|
+
"--replace",
|
|
2752
|
+
action="store_true",
|
|
2753
|
+
help="If VM already exists, stop+undefine it and recreate (also deletes its storage)",
|
|
2754
|
+
)
|
|
2755
|
+
clone_parser.add_argument(
|
|
2756
|
+
"--dry-run",
|
|
2757
|
+
action="store_true",
|
|
2758
|
+
help="Show what would be created without making any changes",
|
|
2759
|
+
)
|
|
2760
|
+
clone_parser.set_defaults(func=cmd_clone)
|
|
2761
|
+
|
|
2762
|
+
# Status command - check VM health from workstation
|
|
2763
|
+
status_parser = subparsers.add_parser("status", help="Check VM installation status and health")
|
|
2764
|
+
status_parser.add_argument(
|
|
2765
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2766
|
+
)
|
|
2767
|
+
status_parser.add_argument(
|
|
2768
|
+
"-u",
|
|
2769
|
+
"--user",
|
|
2770
|
+
action="store_true",
|
|
2771
|
+
help="Use user session (qemu:///session)",
|
|
2772
|
+
)
|
|
2773
|
+
status_parser.add_argument(
|
|
2774
|
+
"--health", "-H", action="store_true", help="Run full health check"
|
|
2775
|
+
)
|
|
2776
|
+
status_parser.add_argument(
|
|
2777
|
+
"--verbose", "-v", action="store_true", help="Show detailed diagnostics (QGA, stderr, etc.)"
|
|
2778
|
+
)
|
|
2779
|
+
status_parser.set_defaults(func=cmd_status)
|
|
2780
|
+
|
|
2781
|
+
# Diagnose command - detailed diagnostics from workstation
|
|
2782
|
+
diagnose_parser = subparsers.add_parser(
|
|
2783
|
+
"diagnose", aliases=["diag"], help="Run detailed VM diagnostics"
|
|
2784
|
+
)
|
|
2785
|
+
diagnose_parser.add_argument(
|
|
2786
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2787
|
+
)
|
|
2788
|
+
diagnose_parser.add_argument(
|
|
2789
|
+
"-u",
|
|
2790
|
+
"--user",
|
|
2791
|
+
action="store_true",
|
|
2792
|
+
help="Use user session (qemu:///session)",
|
|
2793
|
+
)
|
|
2794
|
+
diagnose_parser.add_argument(
|
|
2795
|
+
"--verbose", "-v", action="store_true", help="Show more low-level details"
|
|
2796
|
+
)
|
|
2797
|
+
diagnose_parser.add_argument(
|
|
2798
|
+
"--json", action="store_true", help="Print diagnostics as JSON"
|
|
2799
|
+
)
|
|
2800
|
+
diagnose_parser.set_defaults(func=cmd_diagnose)
|
|
2801
|
+
|
|
2802
|
+
watch_parser = subparsers.add_parser(
|
|
2803
|
+
"watch", help="Watch boot diagnostic output from VM (via QEMU Guest Agent)"
|
|
2804
|
+
)
|
|
2805
|
+
watch_parser.add_argument(
|
|
2806
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2807
|
+
)
|
|
2808
|
+
watch_parser.add_argument(
|
|
2809
|
+
"-u",
|
|
2810
|
+
"--user",
|
|
2811
|
+
action="store_true",
|
|
2812
|
+
help="Use user session (qemu:///session)",
|
|
2813
|
+
)
|
|
2814
|
+
watch_parser.add_argument(
|
|
2815
|
+
"--refresh",
|
|
2816
|
+
type=float,
|
|
2817
|
+
default=1.0,
|
|
2818
|
+
help="Refresh interval in seconds (default: 1.0)",
|
|
2819
|
+
)
|
|
2820
|
+
watch_parser.add_argument(
|
|
2821
|
+
"--timeout",
|
|
2822
|
+
type=int,
|
|
2823
|
+
default=600,
|
|
2824
|
+
help="Max seconds to wait (default: 600)",
|
|
2825
|
+
)
|
|
2826
|
+
watch_parser.set_defaults(func=cmd_watch)
|
|
2827
|
+
|
|
2828
|
+
repair_parser = subparsers.add_parser(
|
|
2829
|
+
"repair", help="Trigger boot diagnostic/repair inside VM (via QEMU Guest Agent)"
|
|
2830
|
+
)
|
|
2831
|
+
repair_parser.add_argument(
|
|
2832
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2833
|
+
)
|
|
2834
|
+
repair_parser.add_argument(
|
|
2835
|
+
"-u",
|
|
2836
|
+
"--user",
|
|
2837
|
+
action="store_true",
|
|
2838
|
+
help="Use user session (qemu:///session)",
|
|
2839
|
+
)
|
|
2840
|
+
repair_parser.add_argument(
|
|
2841
|
+
"--timeout",
|
|
2842
|
+
type=int,
|
|
2843
|
+
default=600,
|
|
2844
|
+
help="Max seconds to wait for repair (default: 600)",
|
|
2845
|
+
)
|
|
2846
|
+
repair_parser.add_argument(
|
|
2847
|
+
"--watch",
|
|
2848
|
+
action="store_true",
|
|
2849
|
+
help="After triggering repair, watch status/log output",
|
|
2850
|
+
)
|
|
2851
|
+
repair_parser.add_argument(
|
|
2852
|
+
"--refresh",
|
|
2853
|
+
type=float,
|
|
2854
|
+
default=1.0,
|
|
2855
|
+
help="Refresh interval for --watch (default: 1.0)",
|
|
2856
|
+
)
|
|
2857
|
+
repair_parser.set_defaults(func=cmd_repair)
|
|
2858
|
+
|
|
2859
|
+
# Export command - package VM for migration
|
|
2860
|
+
export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
|
|
2861
|
+
export_parser.add_argument(
|
|
2862
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2863
|
+
)
|
|
2864
|
+
export_parser.add_argument(
|
|
2865
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
2866
|
+
)
|
|
2867
|
+
export_parser.add_argument(
|
|
2868
|
+
"-o", "--output", help="Output archive filename (default: <vmname>-export.tar.gz)"
|
|
2869
|
+
)
|
|
2870
|
+
export_parser.add_argument(
|
|
2871
|
+
"--include-data", "-d", action="store_true",
|
|
2872
|
+
help="Include shared data (browser profiles, configs) in export"
|
|
2873
|
+
)
|
|
2874
|
+
export_parser.set_defaults(func=cmd_export)
|
|
2875
|
+
|
|
2876
|
+
# Import command - restore VM from export
|
|
2877
|
+
import_parser = subparsers.add_parser("import", help="Import VM from export archive")
|
|
2878
|
+
import_parser.add_argument("archive", help="Path to export archive (.tar.gz)")
|
|
2879
|
+
import_parser.add_argument(
|
|
2880
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
2881
|
+
)
|
|
2882
|
+
import_parser.add_argument(
|
|
2883
|
+
"--replace", action="store_true", help="Replace existing VM if exists"
|
|
2884
|
+
)
|
|
2885
|
+
import_parser.set_defaults(func=cmd_import)
|
|
2886
|
+
|
|
2887
|
+
# Test command - validate VM configuration
|
|
2888
|
+
test_parser = subparsers.add_parser("test", help="Test VM configuration and health")
|
|
2889
|
+
test_parser.add_argument(
|
|
2890
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2891
|
+
)
|
|
2892
|
+
test_parser.add_argument(
|
|
2893
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
2894
|
+
)
|
|
2895
|
+
test_parser.add_argument(
|
|
2896
|
+
"--quick", action="store_true", help="Quick test (no deep health checks)"
|
|
2897
|
+
)
|
|
2898
|
+
test_parser.add_argument(
|
|
2899
|
+
"--verbose", "-v", action="store_true", help="Verbose output"
|
|
2900
|
+
)
|
|
2901
|
+
test_parser.add_argument(
|
|
2902
|
+
"--validate", action="store_true", help="Run full validation (mounts, packages, services)"
|
|
2903
|
+
)
|
|
2904
|
+
test_parser.add_argument(
|
|
2905
|
+
"--require-running-apps",
|
|
2906
|
+
action="store_true",
|
|
2907
|
+
help="Fail validation if expected apps are installed but not currently running",
|
|
2908
|
+
)
|
|
2909
|
+
test_parser.add_argument(
|
|
2910
|
+
"--smoke-test",
|
|
2911
|
+
action="store_true",
|
|
2912
|
+
help="Run smoke tests (installed ≠ works): headless launch checks for key apps",
|
|
2913
|
+
)
|
|
2914
|
+
test_parser.set_defaults(func=cmd_test)
|
|
2915
|
+
|
|
2916
|
+
args = parser.parse_args()
|
|
2917
|
+
|
|
2918
|
+
if hasattr(args, "func"):
|
|
2919
|
+
try:
|
|
2920
|
+
args.func(args)
|
|
2921
|
+
except KeyboardInterrupt:
|
|
2922
|
+
console.print("\n[yellow]Interrupted.[/]")
|
|
2923
|
+
sys.exit(1)
|
|
2924
|
+
except Exception as e:
|
|
2925
|
+
console.print(f"[red]Error: {e}[/]")
|
|
2926
|
+
sys.exit(1)
|
|
2927
|
+
else:
|
|
2928
|
+
interactive_mode()
|
|
2929
|
+
|
|
2930
|
+
|
|
2931
|
+
if __name__ == "__main__":
|
|
2932
|
+
main()
|