clonebox 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/cli.py +629 -215
- clonebox/cloner.py +21 -24
- clonebox/container.py +190 -0
- clonebox/models.py +197 -0
- {clonebox-0.1.14.dist-info → clonebox-0.1.16.dist-info}/METADATA +114 -14
- clonebox-0.1.16.dist-info/RECORD +14 -0
- clonebox-0.1.14.dist-info/RECORD +0 -12
- {clonebox-0.1.14.dist-info → clonebox-0.1.16.dist-info}/WHEEL +0 -0
- {clonebox-0.1.14.dist-info → clonebox-0.1.16.dist-info}/entry_points.txt +0 -0
- {clonebox-0.1.14.dist-info → clonebox-0.1.16.dist-info}/licenses/LICENSE +0 -0
- {clonebox-0.1.14.dist-info → clonebox-0.1.16.dist-info}/top_level.txt +0 -0
clonebox/cli.py
CHANGED
|
@@ -58,6 +58,359 @@ def print_banner():
|
|
|
58
58
|
console.print(f" Version {__version__}\n", style="dim")
|
|
59
59
|
|
|
60
60
|
|
|
61
|
+
def _resolve_vm_name_and_config_file(name: Optional[str]) -> tuple[str, Optional[Path]]:
|
|
62
|
+
config_file: Optional[Path] = None
|
|
63
|
+
|
|
64
|
+
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
65
|
+
target_path = Path(name).expanduser().resolve()
|
|
66
|
+
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
67
|
+
if config_file.exists():
|
|
68
|
+
config = load_clonebox_config(config_file)
|
|
69
|
+
return config["vm"]["name"], config_file
|
|
70
|
+
raise FileNotFoundError(f"Config not found: {config_file}")
|
|
71
|
+
|
|
72
|
+
if not name:
|
|
73
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
74
|
+
if config_file.exists():
|
|
75
|
+
config = load_clonebox_config(config_file)
|
|
76
|
+
return config["vm"]["name"], config_file
|
|
77
|
+
raise FileNotFoundError("No VM name specified and no .clonebox.yaml found")
|
|
78
|
+
|
|
79
|
+
return name, None
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _qga_ping(vm_name: str, conn_uri: str) -> bool:
|
|
83
|
+
import subprocess
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
result = subprocess.run(
|
|
87
|
+
[
|
|
88
|
+
"virsh",
|
|
89
|
+
"--connect",
|
|
90
|
+
conn_uri,
|
|
91
|
+
"qemu-agent-command",
|
|
92
|
+
vm_name,
|
|
93
|
+
json.dumps({"execute": "guest-ping"}),
|
|
94
|
+
],
|
|
95
|
+
capture_output=True,
|
|
96
|
+
text=True,
|
|
97
|
+
timeout=5,
|
|
98
|
+
)
|
|
99
|
+
return result.returncode == 0
|
|
100
|
+
except Exception:
|
|
101
|
+
return False
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _qga_exec(vm_name: str, conn_uri: str, command: str, timeout: int = 10) -> Optional[str]:
|
|
105
|
+
import subprocess
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
payload = {
|
|
109
|
+
"execute": "guest-exec",
|
|
110
|
+
"arguments": {
|
|
111
|
+
"path": "/bin/sh",
|
|
112
|
+
"arg": ["-c", command],
|
|
113
|
+
"capture-output": True,
|
|
114
|
+
},
|
|
115
|
+
}
|
|
116
|
+
exec_result = subprocess.run(
|
|
117
|
+
[
|
|
118
|
+
"virsh",
|
|
119
|
+
"--connect",
|
|
120
|
+
conn_uri,
|
|
121
|
+
"qemu-agent-command",
|
|
122
|
+
vm_name,
|
|
123
|
+
json.dumps(payload),
|
|
124
|
+
],
|
|
125
|
+
capture_output=True,
|
|
126
|
+
text=True,
|
|
127
|
+
timeout=timeout,
|
|
128
|
+
)
|
|
129
|
+
if exec_result.returncode != 0:
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
resp = json.loads(exec_result.stdout)
|
|
133
|
+
pid = resp.get("return", {}).get("pid")
|
|
134
|
+
if not pid:
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
import base64
|
|
138
|
+
import time
|
|
139
|
+
|
|
140
|
+
deadline = time.time() + timeout
|
|
141
|
+
while time.time() < deadline:
|
|
142
|
+
status_payload = {"execute": "guest-exec-status", "arguments": {"pid": pid}}
|
|
143
|
+
status_result = subprocess.run(
|
|
144
|
+
[
|
|
145
|
+
"virsh",
|
|
146
|
+
"--connect",
|
|
147
|
+
conn_uri,
|
|
148
|
+
"qemu-agent-command",
|
|
149
|
+
vm_name,
|
|
150
|
+
json.dumps(status_payload),
|
|
151
|
+
],
|
|
152
|
+
capture_output=True,
|
|
153
|
+
text=True,
|
|
154
|
+
timeout=5,
|
|
155
|
+
)
|
|
156
|
+
if status_result.returncode != 0:
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
status_resp = json.loads(status_result.stdout)
|
|
160
|
+
ret = status_resp.get("return", {})
|
|
161
|
+
if not ret.get("exited", False):
|
|
162
|
+
time.sleep(0.3)
|
|
163
|
+
continue
|
|
164
|
+
|
|
165
|
+
out_data = ret.get("out-data")
|
|
166
|
+
if out_data:
|
|
167
|
+
return base64.b64decode(out_data).decode().strip()
|
|
168
|
+
return ""
|
|
169
|
+
|
|
170
|
+
return None
|
|
171
|
+
except Exception:
|
|
172
|
+
return None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def run_vm_diagnostics(
|
|
176
|
+
vm_name: str,
|
|
177
|
+
conn_uri: str,
|
|
178
|
+
config_file: Optional[Path],
|
|
179
|
+
*,
|
|
180
|
+
verbose: bool = False,
|
|
181
|
+
json_output: bool = False,
|
|
182
|
+
) -> dict:
|
|
183
|
+
import subprocess
|
|
184
|
+
|
|
185
|
+
result: dict = {
|
|
186
|
+
"vm": {"name": vm_name, "conn_uri": conn_uri},
|
|
187
|
+
"state": {},
|
|
188
|
+
"network": {},
|
|
189
|
+
"qga": {},
|
|
190
|
+
"cloud_init": {},
|
|
191
|
+
"mounts": {},
|
|
192
|
+
"health": {},
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
console.print(f"[bold cyan]🧪 Diagnostics: {vm_name}[/]\n")
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
domstate = subprocess.run(
|
|
199
|
+
["virsh", "--connect", conn_uri, "domstate", vm_name],
|
|
200
|
+
capture_output=True,
|
|
201
|
+
text=True,
|
|
202
|
+
timeout=5,
|
|
203
|
+
)
|
|
204
|
+
result["state"] = {
|
|
205
|
+
"returncode": domstate.returncode,
|
|
206
|
+
"stdout": domstate.stdout.strip(),
|
|
207
|
+
"stderr": domstate.stderr.strip(),
|
|
208
|
+
}
|
|
209
|
+
if domstate.returncode == 0 and domstate.stdout.strip():
|
|
210
|
+
console.print(f"[green]✅ VM State: {domstate.stdout.strip()}[/]")
|
|
211
|
+
else:
|
|
212
|
+
console.print("[red]❌ VM State: unable to read[/]")
|
|
213
|
+
if verbose and domstate.stderr.strip():
|
|
214
|
+
console.print(f"[dim]{domstate.stderr.strip()}[/]")
|
|
215
|
+
except subprocess.TimeoutExpired:
|
|
216
|
+
result["state"] = {"error": "timeout"}
|
|
217
|
+
console.print("[red]❌ VM State: timeout[/]")
|
|
218
|
+
if json_output:
|
|
219
|
+
console.print_json(json.dumps(result))
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
console.print("\n[bold]🔍 Checking VM network...[/]")
|
|
223
|
+
try:
|
|
224
|
+
domifaddr = subprocess.run(
|
|
225
|
+
["virsh", "--connect", conn_uri, "domifaddr", vm_name],
|
|
226
|
+
capture_output=True,
|
|
227
|
+
text=True,
|
|
228
|
+
timeout=10,
|
|
229
|
+
)
|
|
230
|
+
result["network"] = {
|
|
231
|
+
"returncode": domifaddr.returncode,
|
|
232
|
+
"stdout": domifaddr.stdout.strip(),
|
|
233
|
+
"stderr": domifaddr.stderr.strip(),
|
|
234
|
+
}
|
|
235
|
+
if domifaddr.stdout.strip():
|
|
236
|
+
console.print(f"[dim]{domifaddr.stdout.strip()}[/]")
|
|
237
|
+
else:
|
|
238
|
+
console.print("[yellow]⚠️ No interface address detected yet[/]")
|
|
239
|
+
if verbose and domifaddr.stderr.strip():
|
|
240
|
+
console.print(f"[dim]{domifaddr.stderr.strip()}[/]")
|
|
241
|
+
except Exception as e:
|
|
242
|
+
result["network"] = {"error": str(e)}
|
|
243
|
+
console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
|
|
244
|
+
|
|
245
|
+
guest_agent_ready = _qga_ping(vm_name, conn_uri)
|
|
246
|
+
result["qga"]["ready"] = guest_agent_ready
|
|
247
|
+
if verbose:
|
|
248
|
+
console.print("\n[bold]🤖 QEMU Guest Agent...[/]")
|
|
249
|
+
console.print(f"{'[green]✅' if guest_agent_ready else '[red]❌'} QGA connected")
|
|
250
|
+
|
|
251
|
+
if not guest_agent_ready:
|
|
252
|
+
try:
|
|
253
|
+
dumpxml = subprocess.run(
|
|
254
|
+
["virsh", "--connect", conn_uri, "dumpxml", vm_name],
|
|
255
|
+
capture_output=True,
|
|
256
|
+
text=True,
|
|
257
|
+
timeout=10,
|
|
258
|
+
)
|
|
259
|
+
has_qga_channel = False
|
|
260
|
+
if dumpxml.returncode == 0:
|
|
261
|
+
has_qga_channel = "org.qemu.guest_agent.0" in dumpxml.stdout
|
|
262
|
+
result["qga"]["dumpxml_returncode"] = dumpxml.returncode
|
|
263
|
+
result["qga"]["has_channel"] = has_qga_channel
|
|
264
|
+
if dumpxml.stderr.strip():
|
|
265
|
+
result["qga"]["dumpxml_stderr"] = dumpxml.stderr.strip()
|
|
266
|
+
|
|
267
|
+
console.print(
|
|
268
|
+
f"[dim]Guest agent channel in VM XML: {'present' if has_qga_channel else 'missing'}[/]"
|
|
269
|
+
)
|
|
270
|
+
except Exception as e:
|
|
271
|
+
result["qga"]["dumpxml_error"] = str(e)
|
|
272
|
+
|
|
273
|
+
try:
|
|
274
|
+
ping_attempt = subprocess.run(
|
|
275
|
+
[
|
|
276
|
+
"virsh",
|
|
277
|
+
"--connect",
|
|
278
|
+
conn_uri,
|
|
279
|
+
"qemu-agent-command",
|
|
280
|
+
vm_name,
|
|
281
|
+
json.dumps({"execute": "guest-ping"}),
|
|
282
|
+
],
|
|
283
|
+
capture_output=True,
|
|
284
|
+
text=True,
|
|
285
|
+
timeout=10,
|
|
286
|
+
)
|
|
287
|
+
result["qga"]["ping_returncode"] = ping_attempt.returncode
|
|
288
|
+
result["qga"]["ping_stdout"] = ping_attempt.stdout.strip()
|
|
289
|
+
result["qga"]["ping_stderr"] = ping_attempt.stderr.strip()
|
|
290
|
+
if ping_attempt.stderr.strip():
|
|
291
|
+
console.print(f"[dim]qemu-agent-command stderr: {ping_attempt.stderr.strip()}[/]")
|
|
292
|
+
except Exception as e:
|
|
293
|
+
result["qga"]["ping_error"] = str(e)
|
|
294
|
+
|
|
295
|
+
console.print("[dim]If channel is present, the agent inside VM may not be running yet.[/]")
|
|
296
|
+
console.print("[dim]Inside VM try: sudo systemctl status qemu-guest-agent && sudo systemctl restart qemu-guest-agent[/]")
|
|
297
|
+
|
|
298
|
+
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
299
|
+
cloud_init_complete = False
|
|
300
|
+
if not guest_agent_ready:
|
|
301
|
+
result["cloud_init"] = {"status": "unknown", "reason": "qga_not_ready"}
|
|
302
|
+
console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU guest agent not connected yet)[/]")
|
|
303
|
+
else:
|
|
304
|
+
ready_msg = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-ready 2>/dev/null || true", timeout=10)
|
|
305
|
+
result["cloud_init"]["clonebox_ready_file"] = ready_msg
|
|
306
|
+
if ready_msg and "CloneBox VM ready" in ready_msg:
|
|
307
|
+
cloud_init_complete = True
|
|
308
|
+
result["cloud_init"]["status"] = "complete"
|
|
309
|
+
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
310
|
+
else:
|
|
311
|
+
ci_status = _qga_exec(vm_name, conn_uri, "cloud-init status 2>/dev/null || true", timeout=10)
|
|
312
|
+
result["cloud_init"]["cloud_init_status"] = ci_status
|
|
313
|
+
result["cloud_init"]["status"] = "running"
|
|
314
|
+
console.print("[yellow]⏳ Cloud-init: Still running[/]")
|
|
315
|
+
if verbose and ci_status:
|
|
316
|
+
console.print(f"[dim]{ci_status}[/]")
|
|
317
|
+
|
|
318
|
+
console.print("\n[bold]💾 Checking mount status...[/]")
|
|
319
|
+
if not cloud_init_complete:
|
|
320
|
+
console.print("[dim]Mounts may not be ready until cloud-init completes.[/]")
|
|
321
|
+
|
|
322
|
+
mounts_detail: list[dict] = []
|
|
323
|
+
result["mounts"]["details"] = mounts_detail
|
|
324
|
+
if not guest_agent_ready:
|
|
325
|
+
console.print("[yellow]⏳ QEMU guest agent not connected yet - cannot verify mounts.[/]")
|
|
326
|
+
result["mounts"]["status"] = "unknown"
|
|
327
|
+
else:
|
|
328
|
+
if not config_file:
|
|
329
|
+
config_file = Path.cwd() / ".clonebox.yaml"
|
|
330
|
+
|
|
331
|
+
if not config_file.exists():
|
|
332
|
+
console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
|
|
333
|
+
result["mounts"]["status"] = "no_config"
|
|
334
|
+
else:
|
|
335
|
+
config = load_clonebox_config(config_file)
|
|
336
|
+
all_paths = config.get("paths", {}).copy()
|
|
337
|
+
all_paths.update(config.get("app_data_paths", {}))
|
|
338
|
+
result["mounts"]["expected"] = list(all_paths.values())
|
|
339
|
+
mount_output = _qga_exec(vm_name, conn_uri, "mount | grep 9p || true", timeout=10) or ""
|
|
340
|
+
mounted_paths = [line.split()[2] for line in mount_output.split("\n") if line.strip()]
|
|
341
|
+
result["mounts"]["mounted_paths"] = mounted_paths
|
|
342
|
+
|
|
343
|
+
mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
|
|
344
|
+
mount_table.add_column("Guest Path", style="bold")
|
|
345
|
+
mount_table.add_column("Mounted", justify="center")
|
|
346
|
+
mount_table.add_column("Accessible", justify="center")
|
|
347
|
+
mount_table.add_column("Files", justify="right")
|
|
348
|
+
|
|
349
|
+
working_mounts = 0
|
|
350
|
+
total_mounts = 0
|
|
351
|
+
for _, guest_path in all_paths.items():
|
|
352
|
+
total_mounts += 1
|
|
353
|
+
is_mounted = any(guest_path == mp or guest_path in mp for mp in mounted_paths)
|
|
354
|
+
accessible = False
|
|
355
|
+
file_count: str = "?"
|
|
356
|
+
|
|
357
|
+
if is_mounted:
|
|
358
|
+
test_out = _qga_exec(vm_name, conn_uri, f"test -d {guest_path} && echo yes || echo no", timeout=5)
|
|
359
|
+
accessible = test_out == "yes"
|
|
360
|
+
if accessible:
|
|
361
|
+
count_str = _qga_exec(vm_name, conn_uri, f"ls -A {guest_path} 2>/dev/null | wc -l", timeout=5)
|
|
362
|
+
if count_str and count_str.strip().isdigit():
|
|
363
|
+
file_count = count_str.strip()
|
|
364
|
+
|
|
365
|
+
if is_mounted and accessible:
|
|
366
|
+
working_mounts += 1
|
|
367
|
+
|
|
368
|
+
mount_table.add_row(
|
|
369
|
+
guest_path,
|
|
370
|
+
"[green]✅[/]" if is_mounted else "[red]❌[/]",
|
|
371
|
+
"[green]✅[/]" if accessible else ("[red]❌[/]" if is_mounted else "[dim]N/A[/]"),
|
|
372
|
+
file_count,
|
|
373
|
+
)
|
|
374
|
+
mounts_detail.append(
|
|
375
|
+
{
|
|
376
|
+
"guest_path": guest_path,
|
|
377
|
+
"mounted": is_mounted,
|
|
378
|
+
"accessible": accessible,
|
|
379
|
+
"files": file_count,
|
|
380
|
+
}
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
result["mounts"]["working"] = working_mounts
|
|
384
|
+
result["mounts"]["total"] = total_mounts
|
|
385
|
+
result["mounts"]["status"] = "ok" if working_mounts == total_mounts else "partial"
|
|
386
|
+
|
|
387
|
+
console.print(mount_table)
|
|
388
|
+
console.print(f"[dim]{working_mounts}/{total_mounts} mounts working[/]")
|
|
389
|
+
|
|
390
|
+
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
391
|
+
if not guest_agent_ready:
|
|
392
|
+
result["health"]["status"] = "unknown"
|
|
393
|
+
console.print("[dim]Health status: Not available yet (QEMU guest agent not ready)[/]")
|
|
394
|
+
else:
|
|
395
|
+
health_status = _qga_exec(vm_name, conn_uri, "cat /var/log/clonebox-health-status 2>/dev/null || true", timeout=10)
|
|
396
|
+
result["health"]["raw"] = health_status
|
|
397
|
+
if health_status and "HEALTH_STATUS=OK" in health_status:
|
|
398
|
+
result["health"]["status"] = "ok"
|
|
399
|
+
console.print("[green]✅ Health: All checks passed[/]")
|
|
400
|
+
elif health_status and "HEALTH_STATUS=FAILED" in health_status:
|
|
401
|
+
result["health"]["status"] = "failed"
|
|
402
|
+
console.print("[red]❌ Health: Some checks failed[/]")
|
|
403
|
+
else:
|
|
404
|
+
result["health"]["status"] = "not_run"
|
|
405
|
+
console.print("[yellow]⏳ Health check not yet run[/]")
|
|
406
|
+
if verbose and health_status:
|
|
407
|
+
console.print(f"[dim]{health_status}[/]")
|
|
408
|
+
|
|
409
|
+
if json_output:
|
|
410
|
+
console.print_json(json.dumps(result))
|
|
411
|
+
return result
|
|
412
|
+
|
|
413
|
+
|
|
61
414
|
def interactive_mode():
|
|
62
415
|
"""Run the interactive VM creation wizard."""
|
|
63
416
|
print_banner()
|
|
@@ -579,239 +932,177 @@ def cmd_list(args):
|
|
|
579
932
|
console.print(table)
|
|
580
933
|
|
|
581
934
|
|
|
935
|
+
def cmd_container_up(args):
|
|
936
|
+
"""Start a container sandbox."""
|
|
937
|
+
try:
|
|
938
|
+
from clonebox.container import ContainerCloner
|
|
939
|
+
from clonebox.models import ContainerConfig
|
|
940
|
+
except ModuleNotFoundError as e:
|
|
941
|
+
raise ModuleNotFoundError(
|
|
942
|
+
"Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
|
|
943
|
+
) from e
|
|
944
|
+
|
|
945
|
+
mounts: dict[str, str] = {}
|
|
946
|
+
for m in getattr(args, "mount", []) or []:
|
|
947
|
+
if ":" not in m:
|
|
948
|
+
raise ValueError(f"Invalid mount: {m} (expected HOST:CONTAINER)")
|
|
949
|
+
host, container_path = m.split(":", 1)
|
|
950
|
+
mounts[host] = container_path
|
|
951
|
+
|
|
952
|
+
cfg_kwargs: dict = {
|
|
953
|
+
"engine": getattr(args, "engine", "auto"),
|
|
954
|
+
"image": getattr(args, "image", "ubuntu:22.04"),
|
|
955
|
+
"workspace": Path(getattr(args, "path", ".")),
|
|
956
|
+
"extra_mounts": mounts,
|
|
957
|
+
"env_from_dotenv": not getattr(args, "no_dotenv", False),
|
|
958
|
+
"packages": getattr(args, "package", []) or [],
|
|
959
|
+
"ports": getattr(args, "port", []) or [],
|
|
960
|
+
}
|
|
961
|
+
if getattr(args, "name", None):
|
|
962
|
+
cfg_kwargs["name"] = args.name
|
|
963
|
+
|
|
964
|
+
cfg = ContainerConfig(**cfg_kwargs)
|
|
965
|
+
|
|
966
|
+
cloner = ContainerCloner(engine=cfg.engine)
|
|
967
|
+
cloner.up(cfg, detach=getattr(args, "detach", False))
|
|
968
|
+
|
|
969
|
+
|
|
970
|
+
def cmd_container_ps(args):
|
|
971
|
+
"""List containers."""
|
|
972
|
+
try:
|
|
973
|
+
from clonebox.container import ContainerCloner
|
|
974
|
+
except ModuleNotFoundError as e:
|
|
975
|
+
raise ModuleNotFoundError(
|
|
976
|
+
"Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
|
|
977
|
+
) from e
|
|
978
|
+
|
|
979
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
980
|
+
items = cloner.ps(all=getattr(args, "all", False))
|
|
981
|
+
|
|
982
|
+
if getattr(args, "json", False):
|
|
983
|
+
print(json.dumps(items, indent=2))
|
|
984
|
+
return
|
|
985
|
+
|
|
986
|
+
if not items:
|
|
987
|
+
console.print("[dim]No containers found.[/]")
|
|
988
|
+
return
|
|
989
|
+
|
|
990
|
+
table = Table(title="Containers", border_style="cyan")
|
|
991
|
+
table.add_column("Name", style="bold")
|
|
992
|
+
table.add_column("Image")
|
|
993
|
+
table.add_column("Status")
|
|
994
|
+
table.add_column("Ports")
|
|
995
|
+
|
|
996
|
+
for c in items:
|
|
997
|
+
table.add_row(
|
|
998
|
+
str(c.get("name", "")),
|
|
999
|
+
str(c.get("image", "")),
|
|
1000
|
+
str(c.get("status", "")),
|
|
1001
|
+
str(c.get("ports", "")),
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
console.print(table)
|
|
1005
|
+
|
|
1006
|
+
|
|
1007
|
+
def cmd_container_stop(args):
|
|
1008
|
+
"""Stop a container."""
|
|
1009
|
+
try:
|
|
1010
|
+
from clonebox.container import ContainerCloner
|
|
1011
|
+
except ModuleNotFoundError as e:
|
|
1012
|
+
raise ModuleNotFoundError(
|
|
1013
|
+
"Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
|
|
1014
|
+
) from e
|
|
1015
|
+
|
|
1016
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1017
|
+
cloner.stop(args.name)
|
|
1018
|
+
|
|
1019
|
+
|
|
1020
|
+
def cmd_container_rm(args):
|
|
1021
|
+
"""Remove a container."""
|
|
1022
|
+
try:
|
|
1023
|
+
from clonebox.container import ContainerCloner
|
|
1024
|
+
except ModuleNotFoundError as e:
|
|
1025
|
+
raise ModuleNotFoundError(
|
|
1026
|
+
"Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
|
|
1027
|
+
) from e
|
|
1028
|
+
|
|
1029
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1030
|
+
cloner.rm(args.name, force=getattr(args, "force", False))
|
|
1031
|
+
|
|
1032
|
+
|
|
1033
|
+
def cmd_container_down(args):
|
|
1034
|
+
"""Stop and remove a container."""
|
|
1035
|
+
try:
|
|
1036
|
+
from clonebox.container import ContainerCloner
|
|
1037
|
+
except ModuleNotFoundError as e:
|
|
1038
|
+
raise ModuleNotFoundError(
|
|
1039
|
+
"Container features require extra dependencies (e.g. pydantic). Install them to use 'clonebox container'."
|
|
1040
|
+
) from e
|
|
1041
|
+
|
|
1042
|
+
cloner = ContainerCloner(engine=getattr(args, "engine", "auto"))
|
|
1043
|
+
cloner.stop(args.name)
|
|
1044
|
+
cloner.rm(args.name, force=True)
|
|
1045
|
+
|
|
1046
|
+
|
|
1047
|
+
def cmd_diagnose(args):
|
|
1048
|
+
"""Run detailed VM diagnostics (standalone)."""
|
|
1049
|
+
name = args.name
|
|
1050
|
+
user_session = getattr(args, "user", False)
|
|
1051
|
+
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
1052
|
+
|
|
1053
|
+
try:
|
|
1054
|
+
vm_name, config_file = _resolve_vm_name_and_config_file(name)
|
|
1055
|
+
except FileNotFoundError as e:
|
|
1056
|
+
console.print(f"[red]❌ {e}[/]")
|
|
1057
|
+
return
|
|
1058
|
+
|
|
1059
|
+
run_vm_diagnostics(
|
|
1060
|
+
vm_name,
|
|
1061
|
+
conn_uri,
|
|
1062
|
+
config_file,
|
|
1063
|
+
verbose=getattr(args, "verbose", False),
|
|
1064
|
+
json_output=getattr(args, "json", False),
|
|
1065
|
+
)
|
|
1066
|
+
|
|
1067
|
+
|
|
582
1068
|
def cmd_status(args):
|
|
583
1069
|
"""Check VM installation status and health from workstation."""
|
|
584
1070
|
import subprocess
|
|
585
|
-
|
|
1071
|
+
|
|
586
1072
|
name = args.name
|
|
587
1073
|
user_session = getattr(args, "user", False)
|
|
588
1074
|
conn_uri = "qemu:///session" if user_session else "qemu:///system"
|
|
589
|
-
|
|
590
|
-
# If name is a path, load config to get VM name
|
|
591
|
-
if name and (name.startswith(".") or name.startswith("/") or name.startswith("~")):
|
|
592
|
-
target_path = Path(name).expanduser().resolve()
|
|
593
|
-
config_file = target_path / ".clonebox.yaml" if target_path.is_dir() else target_path
|
|
594
|
-
if config_file.exists():
|
|
595
|
-
config = load_clonebox_config(config_file)
|
|
596
|
-
name = config["vm"]["name"]
|
|
597
|
-
else:
|
|
598
|
-
console.print(f"[red]❌ Config not found: {config_file}[/]")
|
|
599
|
-
return
|
|
600
|
-
|
|
601
|
-
if not name:
|
|
602
|
-
# Try current directory
|
|
603
|
-
config_file = Path.cwd() / ".clonebox.yaml"
|
|
604
|
-
if config_file.exists():
|
|
605
|
-
config = load_clonebox_config(config_file)
|
|
606
|
-
name = config["vm"]["name"]
|
|
607
|
-
else:
|
|
608
|
-
console.print("[red]❌ No VM name specified and no .clonebox.yaml found[/]")
|
|
609
|
-
return
|
|
610
|
-
|
|
611
|
-
console.print(f"[bold cyan]📊 Checking VM status: {name}[/]\n")
|
|
612
|
-
|
|
613
|
-
# Check VM state
|
|
1075
|
+
|
|
614
1076
|
try:
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
)
|
|
619
|
-
vm_state = result.stdout.strip()
|
|
620
|
-
|
|
621
|
-
if "running" in vm_state.lower():
|
|
622
|
-
console.print(f"[green]✅ VM State: {vm_state}[/]")
|
|
623
|
-
elif "shut off" in vm_state.lower():
|
|
624
|
-
console.print(f"[yellow]⏸️ VM State: {vm_state}[/]")
|
|
625
|
-
console.print("[dim]Start with: clonebox start .[/]")
|
|
626
|
-
return
|
|
627
|
-
else:
|
|
628
|
-
console.print(f"[dim]VM State: {vm_state}[/]")
|
|
629
|
-
except subprocess.TimeoutExpired:
|
|
630
|
-
console.print("[red]❌ Timeout checking VM state[/]")
|
|
631
|
-
return
|
|
632
|
-
except Exception as e:
|
|
633
|
-
console.print(f"[red]❌ Error: {e}[/]")
|
|
1077
|
+
vm_name, config_file = _resolve_vm_name_and_config_file(name)
|
|
1078
|
+
except FileNotFoundError as e:
|
|
1079
|
+
console.print(f"[red]❌ {e}[/]")
|
|
634
1080
|
return
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
try:
|
|
639
|
-
result = subprocess.run(
|
|
640
|
-
["virsh", "--connect", conn_uri, "domifaddr", name],
|
|
641
|
-
capture_output=True, text=True, timeout=10
|
|
642
|
-
)
|
|
643
|
-
if result.stdout.strip():
|
|
644
|
-
console.print(f"[dim]{result.stdout.strip()}[/]")
|
|
645
|
-
# Extract IP
|
|
646
|
-
for line in result.stdout.split('\n'):
|
|
647
|
-
if 'ipv4' in line.lower():
|
|
648
|
-
parts = line.split()
|
|
649
|
-
for p in parts:
|
|
650
|
-
if '/' in p and '.' in p:
|
|
651
|
-
ip = p.split('/')[0]
|
|
652
|
-
console.print(f"[green]IP Address: {ip}[/]")
|
|
653
|
-
break
|
|
654
|
-
else:
|
|
655
|
-
console.print("[yellow]⚠️ No IP address yet (VM may still be booting)[/]")
|
|
656
|
-
except Exception as e:
|
|
657
|
-
console.print(f"[yellow]⚠️ Cannot get IP: {e}[/]")
|
|
658
|
-
|
|
659
|
-
# Check cloud-init status via console
|
|
660
|
-
console.print("\n[bold]☁️ Checking cloud-init status...[/]")
|
|
661
|
-
try:
|
|
662
|
-
# Use virsh console to check - this is tricky, so we check for the ready file
|
|
663
|
-
result = subprocess.run(
|
|
664
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
665
|
-
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-ready"],"capture-output":true}}'],
|
|
666
|
-
capture_output=True, text=True, timeout=10
|
|
667
|
-
)
|
|
668
|
-
if "CloneBox VM ready" in result.stdout or result.returncode == 0:
|
|
669
|
-
console.print("[green]✅ Cloud-init: Complete[/]")
|
|
670
|
-
else:
|
|
671
|
-
console.print("[yellow]⏳ Cloud-init: Still running (packages installing)[/]")
|
|
672
|
-
except Exception:
|
|
673
|
-
console.print("[yellow]⏳ Cloud-init status: Unknown (QEMU agent may not be ready)[/]")
|
|
674
|
-
|
|
675
|
-
# Check mount status
|
|
676
|
-
console.print("\n[bold]💾 Checking mount status...[/]")
|
|
677
|
-
try:
|
|
678
|
-
# Load config to get expected mounts
|
|
679
|
-
config_file = Path.cwd() / ".clonebox.yaml"
|
|
680
|
-
if config_file.exists():
|
|
681
|
-
config = load_clonebox_config(config_file)
|
|
682
|
-
all_paths = config.get("paths", {}).copy()
|
|
683
|
-
all_paths.update(config.get("app_data_paths", {}))
|
|
684
|
-
|
|
685
|
-
if all_paths:
|
|
686
|
-
# Check which mounts are active
|
|
687
|
-
result = subprocess.run(
|
|
688
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
689
|
-
'{"execute":"guest-exec","arguments":{"path":"/bin/sh","arg":["-c","mount | grep 9p"],"capture-output":true}}'],
|
|
690
|
-
capture_output=True, text=True, timeout=10
|
|
691
|
-
)
|
|
692
|
-
|
|
693
|
-
mount_table = Table(title="Mount Points", border_style="cyan", show_header=True)
|
|
694
|
-
mount_table.add_column("Guest Path", style="bold")
|
|
695
|
-
mount_table.add_column("Status", justify="center")
|
|
696
|
-
mount_table.add_column("Files", justify="right")
|
|
697
|
-
|
|
698
|
-
mounted_paths = []
|
|
699
|
-
if result.returncode == 0 and "return" in result.stdout:
|
|
700
|
-
# Parse guest-exec response for mount output
|
|
701
|
-
import json
|
|
702
|
-
try:
|
|
703
|
-
resp = json.loads(result.stdout)
|
|
704
|
-
if "return" in resp and "pid" in resp["return"]:
|
|
705
|
-
# Get the output from guest-exec-status
|
|
706
|
-
pid = resp["return"]["pid"]
|
|
707
|
-
status_result = subprocess.run(
|
|
708
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
709
|
-
f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
|
|
710
|
-
capture_output=True, text=True, timeout=5
|
|
711
|
-
)
|
|
712
|
-
if status_result.returncode == 0:
|
|
713
|
-
status_resp = json.loads(status_result.stdout)
|
|
714
|
-
if "return" in status_resp and "out-data" in status_resp["return"]:
|
|
715
|
-
import base64
|
|
716
|
-
mount_output = base64.b64decode(status_resp["return"]["out-data"]).decode()
|
|
717
|
-
mounted_paths = [line.split()[2] for line in mount_output.split('\n') if line.strip()]
|
|
718
|
-
except:
|
|
719
|
-
pass
|
|
720
|
-
|
|
721
|
-
# Check each expected mount
|
|
722
|
-
working_mounts = 0
|
|
723
|
-
total_mounts = 0
|
|
724
|
-
for host_path, guest_path in all_paths.items():
|
|
725
|
-
total_mounts += 1
|
|
726
|
-
is_mounted = any(guest_path in mp for mp in mounted_paths)
|
|
727
|
-
|
|
728
|
-
# Try to get file count
|
|
729
|
-
file_count = "?"
|
|
730
|
-
if is_mounted:
|
|
731
|
-
try:
|
|
732
|
-
count_result = subprocess.run(
|
|
733
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
734
|
-
f'{{"execute":"guest-exec","arguments":{{"path":"/bin/sh","arg":["-c","ls -A {guest_path} 2>/dev/null | wc -l"],"capture-output":true}}}}'],
|
|
735
|
-
capture_output=True, text=True, timeout=5
|
|
736
|
-
)
|
|
737
|
-
if count_result.returncode == 0:
|
|
738
|
-
resp = json.loads(count_result.stdout)
|
|
739
|
-
if "return" in resp and "pid" in resp["return"]:
|
|
740
|
-
pid = resp["return"]["pid"]
|
|
741
|
-
import time
|
|
742
|
-
time.sleep(0.5)
|
|
743
|
-
status_result = subprocess.run(
|
|
744
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
745
|
-
f'{{"execute":"guest-exec-status","arguments":{{"pid":{pid}}}}}'],
|
|
746
|
-
capture_output=True, text=True, timeout=5
|
|
747
|
-
)
|
|
748
|
-
if status_result.returncode == 0:
|
|
749
|
-
status_resp = json.loads(status_result.stdout)
|
|
750
|
-
if "return" in status_resp and "out-data" in status_resp["return"]:
|
|
751
|
-
file_count = base64.b64decode(status_resp["return"]["out-data"]).decode().strip()
|
|
752
|
-
except:
|
|
753
|
-
pass
|
|
754
|
-
|
|
755
|
-
if is_mounted:
|
|
756
|
-
status = "[green]✅ Mounted[/]"
|
|
757
|
-
working_mounts += 1
|
|
758
|
-
else:
|
|
759
|
-
status = "[red]❌ Not mounted[/]"
|
|
760
|
-
|
|
761
|
-
mount_table.add_row(guest_path, status, str(file_count))
|
|
762
|
-
|
|
763
|
-
console.print(mount_table)
|
|
764
|
-
console.print(f"[dim]{working_mounts}/{total_mounts} mounts active[/]")
|
|
765
|
-
|
|
766
|
-
if working_mounts < total_mounts:
|
|
767
|
-
console.print("[yellow]⚠️ Some mounts are missing. Try remounting in VM:[/]")
|
|
768
|
-
console.print("[dim] sudo mount -a[/]")
|
|
769
|
-
console.print("[dim]Or rebuild VM with: clonebox clone . --user --run --replace[/]")
|
|
770
|
-
else:
|
|
771
|
-
console.print("[dim]No mount points configured[/]")
|
|
772
|
-
else:
|
|
773
|
-
console.print("[dim]No .clonebox.yaml found - cannot check mounts[/]")
|
|
774
|
-
except Exception as e:
|
|
775
|
-
console.print(f"[yellow]⚠️ Cannot check mounts: {e}[/]")
|
|
776
|
-
console.print("[dim]QEMU guest agent may not be ready yet[/]")
|
|
777
|
-
|
|
778
|
-
# Check health status if available
|
|
779
|
-
console.print("\n[bold]🏥 Health Check Status...[/]")
|
|
780
|
-
try:
|
|
781
|
-
result = subprocess.run(
|
|
782
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command", name,
|
|
783
|
-
'{"execute":"guest-exec","arguments":{"path":"/bin/cat","arg":["/var/log/clonebox-health-status"],"capture-output":true}}'],
|
|
784
|
-
capture_output=True, text=True, timeout=10
|
|
785
|
-
)
|
|
786
|
-
if "HEALTH_STATUS=OK" in result.stdout:
|
|
787
|
-
console.print("[green]✅ Health: All checks passed[/]")
|
|
788
|
-
elif "HEALTH_STATUS=FAILED" in result.stdout:
|
|
789
|
-
console.print("[red]❌ Health: Some checks failed[/]")
|
|
790
|
-
else:
|
|
791
|
-
console.print("[yellow]⏳ Health check not yet run[/]")
|
|
792
|
-
except Exception:
|
|
793
|
-
console.print("[dim]Health status: Not available yet[/]")
|
|
794
|
-
|
|
1081
|
+
|
|
1082
|
+
run_vm_diagnostics(vm_name, conn_uri, config_file, verbose=False, json_output=False)
|
|
1083
|
+
|
|
795
1084
|
# Show useful commands
|
|
796
1085
|
console.print("\n[bold]📋 Useful commands:[/]")
|
|
797
|
-
console.print(f" [cyan]virt-viewer --connect {conn_uri} {
|
|
798
|
-
console.print(f" [cyan]virsh --connect {conn_uri} console {
|
|
1086
|
+
console.print(f" [cyan]virt-viewer --connect {conn_uri} {vm_name}[/] # Open GUI")
|
|
1087
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/] # Console access")
|
|
799
1088
|
console.print(" [dim]Inside VM:[/]")
|
|
800
1089
|
console.print(" [cyan]cat /var/log/clonebox-health.log[/] # Full health report")
|
|
801
1090
|
console.print(" [cyan]sudo cloud-init status[/] # Cloud-init status")
|
|
802
1091
|
console.print(" [cyan]clonebox-health[/] # Re-run health check")
|
|
1092
|
+
console.print(" [dim]On host:[/]")
|
|
1093
|
+
console.print(" [cyan]clonebox test . --user --validate[/] # Full validation (mounts/packages/services)")
|
|
803
1094
|
|
|
804
1095
|
# Run full health check if requested
|
|
805
1096
|
if getattr(args, "health", False):
|
|
806
1097
|
console.print("\n[bold]🔄 Running full health check...[/]")
|
|
807
1098
|
try:
|
|
808
1099
|
result = subprocess.run(
|
|
809
|
-
["virsh", "--connect", conn_uri, "qemu-agent-command",
|
|
1100
|
+
["virsh", "--connect", conn_uri, "qemu-agent-command", vm_name,
|
|
810
1101
|
'{"execute":"guest-exec","arguments":{"path":"/usr/local/bin/clonebox-health","capture-output":true}}'],
|
|
811
1102
|
capture_output=True, text=True, timeout=60
|
|
812
1103
|
)
|
|
813
1104
|
console.print("[green]Health check triggered. View results with:[/]")
|
|
814
|
-
console.print(f" [cyan]virsh --connect {conn_uri} console {
|
|
1105
|
+
console.print(f" [cyan]virsh --connect {conn_uri} console {vm_name}[/]")
|
|
815
1106
|
console.print(" Then run: [cyan]cat /var/log/clonebox-health.log[/]")
|
|
816
1107
|
except Exception as e:
|
|
817
1108
|
console.print(f"[yellow]⚠️ Could not trigger health check: {e}[/]")
|
|
@@ -1347,11 +1638,24 @@ def cmd_test(args):
|
|
|
1347
1638
|
|
|
1348
1639
|
console.print()
|
|
1349
1640
|
|
|
1350
|
-
#
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1641
|
+
# Run full validation if requested
|
|
1642
|
+
if validate_all and state == "running":
|
|
1643
|
+
validator = VMValidator(config, vm_name, conn_uri, console)
|
|
1644
|
+
results = validator.validate_all()
|
|
1645
|
+
|
|
1646
|
+
# Exit with error code if validations failed
|
|
1647
|
+
if results["overall"] == "partial":
|
|
1648
|
+
return 1
|
|
1649
|
+
else:
|
|
1650
|
+
# Summary
|
|
1651
|
+
console.print("[bold]Test Summary[/]")
|
|
1652
|
+
console.print("VM configuration is valid and VM is accessible.")
|
|
1653
|
+
console.print("\n[dim]For full validation including packages, services, and mounts:[/]")
|
|
1654
|
+
console.print("[dim] clonebox test . --user --validate[/]")
|
|
1655
|
+
console.print("\n[dim]For detailed health report, run in VM:[/]")
|
|
1656
|
+
console.print("[dim] cat /var/log/clonebox-health.log[/]")
|
|
1657
|
+
|
|
1658
|
+
return 0
|
|
1355
1659
|
|
|
1356
1660
|
|
|
1357
1661
|
CLONEBOX_CONFIG_FILE = ".clonebox.yaml"
|
|
@@ -1721,12 +2025,16 @@ def create_vm_from_config(
|
|
|
1721
2025
|
def cmd_clone(args):
|
|
1722
2026
|
"""Generate clone config from path and optionally create VM."""
|
|
1723
2027
|
target_path = Path(args.path).resolve()
|
|
2028
|
+
dry_run = getattr(args, "dry_run", False)
|
|
1724
2029
|
|
|
1725
2030
|
if not target_path.exists():
|
|
1726
2031
|
console.print(f"[red]❌ Path does not exist: {target_path}[/]")
|
|
1727
2032
|
return
|
|
1728
2033
|
|
|
1729
|
-
|
|
2034
|
+
if dry_run:
|
|
2035
|
+
console.print(f"[bold cyan]🔍 DRY RUN - Analyzing: {target_path}[/]\n")
|
|
2036
|
+
else:
|
|
2037
|
+
console.print(f"[bold cyan]📦 Generating clone config for: {target_path}[/]\n")
|
|
1730
2038
|
|
|
1731
2039
|
# Detect system state
|
|
1732
2040
|
with Progress(
|
|
@@ -1751,6 +2059,25 @@ def cmd_clone(args):
|
|
|
1751
2059
|
base_image=getattr(args, "base_image", None),
|
|
1752
2060
|
)
|
|
1753
2061
|
|
|
2062
|
+
# Dry run - show what would be created and exit
|
|
2063
|
+
if dry_run:
|
|
2064
|
+
config = yaml.safe_load(yaml_content)
|
|
2065
|
+
console.print(Panel(
|
|
2066
|
+
f"[bold]VM Name:[/] {config['vm']['name']}\n"
|
|
2067
|
+
f"[bold]RAM:[/] {config['vm'].get('ram_mb', 4096)} MB\n"
|
|
2068
|
+
f"[bold]vCPUs:[/] {config['vm'].get('vcpus', 4)}\n"
|
|
2069
|
+
f"[bold]Network:[/] {config['vm'].get('network_mode', 'auto')}\n"
|
|
2070
|
+
f"[bold]Paths:[/] {len(config.get('paths', {}))} mounts\n"
|
|
2071
|
+
f"[bold]Packages:[/] {len(config.get('packages', []))} packages\n"
|
|
2072
|
+
f"[bold]Services:[/] {len(config.get('services', []))} services",
|
|
2073
|
+
title="[bold cyan]Would create VM[/]",
|
|
2074
|
+
border_style="cyan",
|
|
2075
|
+
))
|
|
2076
|
+
console.print("\n[dim]Config preview:[/]")
|
|
2077
|
+
console.print(Panel(yaml_content, title="[bold].clonebox.yaml[/]", border_style="dim"))
|
|
2078
|
+
console.print("\n[yellow]ℹ️ Dry run complete. No changes made.[/]")
|
|
2079
|
+
return
|
|
2080
|
+
|
|
1754
2081
|
# Save config file
|
|
1755
2082
|
config_file = (
|
|
1756
2083
|
target_path / CLONEBOX_CONFIG_FILE
|
|
@@ -2014,6 +2341,64 @@ def main():
|
|
|
2014
2341
|
)
|
|
2015
2342
|
list_parser.set_defaults(func=cmd_list)
|
|
2016
2343
|
|
|
2344
|
+
# Container command
|
|
2345
|
+
container_parser = subparsers.add_parser("container", help="Manage container sandboxes")
|
|
2346
|
+
container_parser.add_argument(
|
|
2347
|
+
"--engine",
|
|
2348
|
+
choices=["auto", "podman", "docker"],
|
|
2349
|
+
default="auto",
|
|
2350
|
+
help="Container engine: auto (default), podman, docker",
|
|
2351
|
+
)
|
|
2352
|
+
container_sub = container_parser.add_subparsers(dest="container_command", help="Container commands")
|
|
2353
|
+
|
|
2354
|
+
container_up = container_sub.add_parser("up", help="Start container")
|
|
2355
|
+
container_up.add_argument("path", nargs="?", default=".", help="Workspace path")
|
|
2356
|
+
container_up.add_argument("--name", help="Container name")
|
|
2357
|
+
container_up.add_argument("--image", default="ubuntu:22.04", help="Container image")
|
|
2358
|
+
container_up.add_argument("--detach", action="store_true", help="Run container in background")
|
|
2359
|
+
container_up.add_argument(
|
|
2360
|
+
"--mount",
|
|
2361
|
+
action="append",
|
|
2362
|
+
default=[],
|
|
2363
|
+
help="Extra mount HOST:CONTAINER (repeatable)",
|
|
2364
|
+
)
|
|
2365
|
+
container_up.add_argument(
|
|
2366
|
+
"--port",
|
|
2367
|
+
action="append",
|
|
2368
|
+
default=[],
|
|
2369
|
+
help="Port mapping (e.g. 8080:80) (repeatable)",
|
|
2370
|
+
)
|
|
2371
|
+
container_up.add_argument(
|
|
2372
|
+
"--package",
|
|
2373
|
+
action="append",
|
|
2374
|
+
default=[],
|
|
2375
|
+
help="APT package to install in image (repeatable)",
|
|
2376
|
+
)
|
|
2377
|
+
container_up.add_argument(
|
|
2378
|
+
"--no-dotenv",
|
|
2379
|
+
action="store_true",
|
|
2380
|
+
help="Do not load env vars from workspace .env",
|
|
2381
|
+
)
|
|
2382
|
+
container_up.set_defaults(func=cmd_container_up)
|
|
2383
|
+
|
|
2384
|
+
container_ps = container_sub.add_parser("ps", aliases=["ls"], help="List containers")
|
|
2385
|
+
container_ps.add_argument("-a", "--all", action="store_true", help="Show all containers")
|
|
2386
|
+
container_ps.add_argument("--json", action="store_true", help="Output JSON")
|
|
2387
|
+
container_ps.set_defaults(func=cmd_container_ps)
|
|
2388
|
+
|
|
2389
|
+
container_stop = container_sub.add_parser("stop", help="Stop container")
|
|
2390
|
+
container_stop.add_argument("name", help="Container name")
|
|
2391
|
+
container_stop.set_defaults(func=cmd_container_stop)
|
|
2392
|
+
|
|
2393
|
+
container_rm = container_sub.add_parser("rm", help="Remove container")
|
|
2394
|
+
container_rm.add_argument("name", help="Container name")
|
|
2395
|
+
container_rm.add_argument("-f", "--force", action="store_true", help="Force remove")
|
|
2396
|
+
container_rm.set_defaults(func=cmd_container_rm)
|
|
2397
|
+
|
|
2398
|
+
container_down = container_sub.add_parser("down", help="Stop and remove container")
|
|
2399
|
+
container_down.add_argument("name", help="Container name")
|
|
2400
|
+
container_down.set_defaults(func=cmd_container_down)
|
|
2401
|
+
|
|
2017
2402
|
# Detect command
|
|
2018
2403
|
detect_parser = subparsers.add_parser("detect", help="Detect system state")
|
|
2019
2404
|
detect_parser.add_argument("--json", action="store_true", help="Output as JSON")
|
|
@@ -2058,6 +2443,11 @@ def main():
|
|
|
2058
2443
|
action="store_true",
|
|
2059
2444
|
help="If VM already exists, stop+undefine it and recreate (also deletes its storage)",
|
|
2060
2445
|
)
|
|
2446
|
+
clone_parser.add_argument(
|
|
2447
|
+
"--dry-run",
|
|
2448
|
+
action="store_true",
|
|
2449
|
+
help="Show what would be created without making any changes",
|
|
2450
|
+
)
|
|
2061
2451
|
clone_parser.set_defaults(func=cmd_clone)
|
|
2062
2452
|
|
|
2063
2453
|
# Status command - check VM health from workstation
|
|
@@ -2076,6 +2466,27 @@ def main():
|
|
|
2076
2466
|
)
|
|
2077
2467
|
status_parser.set_defaults(func=cmd_status)
|
|
2078
2468
|
|
|
2469
|
+
# Diagnose command - detailed diagnostics from workstation
|
|
2470
|
+
diagnose_parser = subparsers.add_parser(
|
|
2471
|
+
"diagnose", aliases=["diag"], help="Run detailed VM diagnostics"
|
|
2472
|
+
)
|
|
2473
|
+
diagnose_parser.add_argument(
|
|
2474
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
2475
|
+
)
|
|
2476
|
+
diagnose_parser.add_argument(
|
|
2477
|
+
"-u",
|
|
2478
|
+
"--user",
|
|
2479
|
+
action="store_true",
|
|
2480
|
+
help="Use user session (qemu:///session)",
|
|
2481
|
+
)
|
|
2482
|
+
diagnose_parser.add_argument(
|
|
2483
|
+
"--verbose", "-v", action="store_true", help="Show more low-level details"
|
|
2484
|
+
)
|
|
2485
|
+
diagnose_parser.add_argument(
|
|
2486
|
+
"--json", action="store_true", help="Print diagnostics as JSON"
|
|
2487
|
+
)
|
|
2488
|
+
diagnose_parser.set_defaults(func=cmd_diagnose)
|
|
2489
|
+
|
|
2079
2490
|
# Export command - package VM for migration
|
|
2080
2491
|
export_parser = subparsers.add_parser("export", help="Export VM and data for migration")
|
|
2081
2492
|
export_parser.add_argument(
|
|
@@ -2118,6 +2529,9 @@ def main():
|
|
|
2118
2529
|
test_parser.add_argument(
|
|
2119
2530
|
"--verbose", "-v", action="store_true", help="Verbose output"
|
|
2120
2531
|
)
|
|
2532
|
+
test_parser.add_argument(
|
|
2533
|
+
"--validate", action="store_true", help="Run full validation (mounts, packages, services)"
|
|
2534
|
+
)
|
|
2121
2535
|
test_parser.set_defaults(func=cmd_test)
|
|
2122
2536
|
|
|
2123
2537
|
args = parser.parse_args()
|