clonebox 1.1.13__py3-none-any.whl → 1.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/audit.py +452 -0
- clonebox/cli.py +966 -10
- clonebox/cloner.py +221 -135
- clonebox/orchestrator.py +568 -0
- clonebox/plugins/__init__.py +24 -0
- clonebox/plugins/base.py +319 -0
- clonebox/plugins/manager.py +523 -0
- clonebox/remote.py +511 -0
- clonebox/secrets.py +9 -6
- clonebox/validator.py +113 -41
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/METADATA +5 -1
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/RECORD +16 -10
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/WHEEL +0 -0
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/entry_points.txt +0 -0
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/licenses/LICENSE +0 -0
- {clonebox-1.1.13.dist-info → clonebox-1.1.15.dist-info}/top_level.txt +0 -0
clonebox/orchestrator.py
ADDED
|
@@ -0,0 +1,568 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-VM orchestration for CloneBox.
|
|
3
|
+
Manages multiple VMs with dependencies, shared networks, and coordinated lifecycle.
|
|
4
|
+
"""
|
|
5
|
+
from collections import defaultdict
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed, Future
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional, List, Dict, Any, Set, Callable
|
|
11
|
+
import threading
|
|
12
|
+
import time
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class VMOrchestrationState(Enum):
|
|
17
|
+
"""State of a VM within orchestration."""
|
|
18
|
+
PENDING = "pending"
|
|
19
|
+
CREATING = "creating"
|
|
20
|
+
STARTING = "starting"
|
|
21
|
+
RUNNING = "running"
|
|
22
|
+
HEALTHY = "healthy"
|
|
23
|
+
UNHEALTHY = "unhealthy"
|
|
24
|
+
STOPPING = "stopping"
|
|
25
|
+
STOPPED = "stopped"
|
|
26
|
+
FAILED = "failed"
|
|
27
|
+
UNKNOWN = "unknown"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class OrchestratedVM:
|
|
32
|
+
"""A VM within an orchestration."""
|
|
33
|
+
name: str
|
|
34
|
+
config_path: Optional[Path] = None
|
|
35
|
+
template: Optional[str] = None
|
|
36
|
+
depends_on: List[str] = field(default_factory=list)
|
|
37
|
+
health_check: Optional[Dict[str, Any]] = None
|
|
38
|
+
environment: Dict[str, str] = field(default_factory=dict)
|
|
39
|
+
volumes: Dict[str, str] = field(default_factory=dict)
|
|
40
|
+
vm_overrides: Dict[str, Any] = field(default_factory=dict)
|
|
41
|
+
state: VMOrchestrationState = VMOrchestrationState.PENDING
|
|
42
|
+
error: Optional[str] = None
|
|
43
|
+
ip_address: Optional[str] = None
|
|
44
|
+
start_time: Optional[float] = None
|
|
45
|
+
health_check_passed: bool = False
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class OrchestrationPlan:
|
|
50
|
+
"""Execution plan for orchestration."""
|
|
51
|
+
vms: Dict[str, OrchestratedVM]
|
|
52
|
+
start_order: List[List[str]] # Groups of VMs that can start in parallel
|
|
53
|
+
stop_order: List[List[str]] # Reverse of start_order
|
|
54
|
+
volumes: Dict[str, Dict[str, Any]]
|
|
55
|
+
networks: Dict[str, Dict[str, Any]]
|
|
56
|
+
defaults: Dict[str, Any]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class OrchestrationResult:
|
|
61
|
+
"""Result of an orchestration operation."""
|
|
62
|
+
success: bool
|
|
63
|
+
states: Dict[str, VMOrchestrationState]
|
|
64
|
+
errors: Dict[str, str]
|
|
65
|
+
duration_seconds: float
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class Orchestrator:
|
|
69
|
+
"""
|
|
70
|
+
Orchestrate multiple VMs with dependencies.
|
|
71
|
+
|
|
72
|
+
Usage:
|
|
73
|
+
orch = Orchestrator.from_file("clonebox-compose.yaml")
|
|
74
|
+
result = orch.up() # Start all VMs in dependency order
|
|
75
|
+
orch.down() # Stop all VMs
|
|
76
|
+
status = orch.status() # Get status of all VMs
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
config: Dict[str, Any],
|
|
82
|
+
cloner: Optional[Any] = None,
|
|
83
|
+
user_session: bool = False,
|
|
84
|
+
max_workers: int = 4,
|
|
85
|
+
):
|
|
86
|
+
self.config = config
|
|
87
|
+
self.user_session = user_session
|
|
88
|
+
self.max_workers = max_workers
|
|
89
|
+
self._cloner = cloner
|
|
90
|
+
self._executor: Optional[ThreadPoolExecutor] = None
|
|
91
|
+
self._lock = threading.Lock()
|
|
92
|
+
|
|
93
|
+
# Parse and validate configuration
|
|
94
|
+
self.plan = self._create_plan()
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_file(
|
|
98
|
+
cls,
|
|
99
|
+
compose_file: Path,
|
|
100
|
+
cloner: Optional[Any] = None,
|
|
101
|
+
user_session: bool = False,
|
|
102
|
+
) -> "Orchestrator":
|
|
103
|
+
"""Create orchestrator from compose file."""
|
|
104
|
+
compose_path = Path(compose_file)
|
|
105
|
+
if not compose_path.exists():
|
|
106
|
+
raise FileNotFoundError(f"Compose file not found: {compose_path}")
|
|
107
|
+
|
|
108
|
+
with open(compose_path) as f:
|
|
109
|
+
config = yaml.safe_load(f)
|
|
110
|
+
|
|
111
|
+
return cls(config, cloner=cloner, user_session=user_session)
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def cloner(self) -> Any:
|
|
115
|
+
"""Get or create VM cloner."""
|
|
116
|
+
if self._cloner is None:
|
|
117
|
+
from clonebox.cloner import SelectiveVMCloner
|
|
118
|
+
self._cloner = SelectiveVMCloner(user_session=self.user_session)
|
|
119
|
+
return self._cloner
|
|
120
|
+
|
|
121
|
+
def _create_plan(self) -> OrchestrationPlan:
|
|
122
|
+
"""Create execution plan from configuration."""
|
|
123
|
+
vms: Dict[str, OrchestratedVM] = {}
|
|
124
|
+
defaults = self.config.get("defaults", {})
|
|
125
|
+
|
|
126
|
+
for name, vm_config in self.config.get("vms", {}).items():
|
|
127
|
+
vms[name] = OrchestratedVM(
|
|
128
|
+
name=name,
|
|
129
|
+
config_path=Path(vm_config["config"]) if "config" in vm_config else None,
|
|
130
|
+
template=vm_config.get("template"),
|
|
131
|
+
depends_on=vm_config.get("depends_on", []),
|
|
132
|
+
health_check=vm_config.get("health_check"),
|
|
133
|
+
environment=vm_config.get("environment", {}),
|
|
134
|
+
volumes=vm_config.get("volumes", {}),
|
|
135
|
+
vm_overrides=vm_config.get("vm", {}),
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Calculate start order using topological sort
|
|
139
|
+
start_order = self._topological_sort(vms)
|
|
140
|
+
# Stop order is reverse
|
|
141
|
+
stop_order = [list(reversed(group)) for group in reversed(start_order)]
|
|
142
|
+
|
|
143
|
+
return OrchestrationPlan(
|
|
144
|
+
vms=vms,
|
|
145
|
+
start_order=start_order,
|
|
146
|
+
stop_order=stop_order,
|
|
147
|
+
volumes=self.config.get("volumes", {}),
|
|
148
|
+
networks=self.config.get("networks", {}),
|
|
149
|
+
defaults=defaults,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
def _topological_sort(self, vms: Dict[str, OrchestratedVM]) -> List[List[str]]:
|
|
153
|
+
"""
|
|
154
|
+
Topological sort with parallel group detection.
|
|
155
|
+
Returns list of groups, where VMs in same group can start in parallel.
|
|
156
|
+
"""
|
|
157
|
+
# Build dependency graph
|
|
158
|
+
in_degree: Dict[str, int] = {name: 0 for name in vms}
|
|
159
|
+
dependents: Dict[str, List[str]] = defaultdict(list)
|
|
160
|
+
|
|
161
|
+
for name, vm in vms.items():
|
|
162
|
+
for dep in vm.depends_on:
|
|
163
|
+
if dep not in vms:
|
|
164
|
+
raise ValueError(f"VM '{name}' depends on unknown VM '{dep}'")
|
|
165
|
+
in_degree[name] += 1
|
|
166
|
+
dependents[dep].append(name)
|
|
167
|
+
|
|
168
|
+
# Kahn's algorithm with level tracking
|
|
169
|
+
levels: List[List[str]] = []
|
|
170
|
+
current_level = [name for name, degree in in_degree.items() if degree == 0]
|
|
171
|
+
|
|
172
|
+
while current_level:
|
|
173
|
+
levels.append(sorted(current_level)) # Sort for deterministic order
|
|
174
|
+
next_level: List[str] = []
|
|
175
|
+
|
|
176
|
+
for name in current_level:
|
|
177
|
+
for dependent in dependents[name]:
|
|
178
|
+
in_degree[dependent] -= 1
|
|
179
|
+
if in_degree[dependent] == 0:
|
|
180
|
+
next_level.append(dependent)
|
|
181
|
+
|
|
182
|
+
current_level = next_level
|
|
183
|
+
|
|
184
|
+
# Check for cycles
|
|
185
|
+
if sum(len(level) for level in levels) != len(vms):
|
|
186
|
+
raise ValueError("Circular dependency detected in VM configuration")
|
|
187
|
+
|
|
188
|
+
return levels
|
|
189
|
+
|
|
190
|
+
def _get_vms_with_dependencies(self, services: List[str]) -> Set[str]:
|
|
191
|
+
"""Get VMs and all their dependencies."""
|
|
192
|
+
to_include: Set[str] = set()
|
|
193
|
+
to_process = list(services)
|
|
194
|
+
|
|
195
|
+
while to_process:
|
|
196
|
+
vm_name = to_process.pop()
|
|
197
|
+
if vm_name in to_include:
|
|
198
|
+
continue
|
|
199
|
+
if vm_name not in self.plan.vms:
|
|
200
|
+
raise ValueError(f"Unknown VM: {vm_name}")
|
|
201
|
+
|
|
202
|
+
to_include.add(vm_name)
|
|
203
|
+
vm = self.plan.vms[vm_name]
|
|
204
|
+
to_process.extend(vm.depends_on)
|
|
205
|
+
|
|
206
|
+
return to_include
|
|
207
|
+
|
|
208
|
+
def _start_vm(self, vm_name: str, console: Optional[Any] = None) -> bool:
|
|
209
|
+
"""Start a single VM."""
|
|
210
|
+
vm = self.plan.vms[vm_name]
|
|
211
|
+
|
|
212
|
+
with self._lock:
|
|
213
|
+
vm.state = VMOrchestrationState.CREATING
|
|
214
|
+
vm.start_time = time.time()
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
# Load VM config
|
|
218
|
+
if vm.config_path and vm.config_path.exists():
|
|
219
|
+
from clonebox.cli import load_clonebox_config, create_vm_from_config
|
|
220
|
+
config = load_clonebox_config(vm.config_path)
|
|
221
|
+
|
|
222
|
+
# Apply overrides from compose file
|
|
223
|
+
if vm.vm_overrides:
|
|
224
|
+
if "vm" in config:
|
|
225
|
+
config["vm"].update(vm.vm_overrides)
|
|
226
|
+
else:
|
|
227
|
+
config.update(vm.vm_overrides)
|
|
228
|
+
|
|
229
|
+
# Apply environment variables
|
|
230
|
+
if vm.environment:
|
|
231
|
+
config.setdefault("environment", {}).update(vm.environment)
|
|
232
|
+
|
|
233
|
+
# Create VM
|
|
234
|
+
with self._lock:
|
|
235
|
+
vm.state = VMOrchestrationState.STARTING
|
|
236
|
+
|
|
237
|
+
create_vm_from_config(
|
|
238
|
+
config,
|
|
239
|
+
start=True,
|
|
240
|
+
user_session=self.user_session,
|
|
241
|
+
replace=False,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
elif vm.template:
|
|
245
|
+
# TODO: Support template-based VM creation
|
|
246
|
+
raise NotImplementedError(f"Template-based VMs not yet implemented: {vm.template}")
|
|
247
|
+
|
|
248
|
+
else:
|
|
249
|
+
raise ValueError(f"VM '{vm_name}' has neither config nor template")
|
|
250
|
+
|
|
251
|
+
with self._lock:
|
|
252
|
+
vm.state = VMOrchestrationState.RUNNING
|
|
253
|
+
|
|
254
|
+
# Run health check if configured
|
|
255
|
+
if vm.health_check:
|
|
256
|
+
if self._run_health_check(vm_name):
|
|
257
|
+
with self._lock:
|
|
258
|
+
vm.state = VMOrchestrationState.HEALTHY
|
|
259
|
+
vm.health_check_passed = True
|
|
260
|
+
else:
|
|
261
|
+
with self._lock:
|
|
262
|
+
vm.state = VMOrchestrationState.UNHEALTHY
|
|
263
|
+
|
|
264
|
+
return True
|
|
265
|
+
|
|
266
|
+
except Exception as e:
|
|
267
|
+
with self._lock:
|
|
268
|
+
vm.state = VMOrchestrationState.FAILED
|
|
269
|
+
vm.error = str(e)
|
|
270
|
+
return False
|
|
271
|
+
|
|
272
|
+
def _stop_vm(self, vm_name: str, force: bool = False, console: Optional[Any] = None) -> bool:
|
|
273
|
+
"""Stop a single VM."""
|
|
274
|
+
vm = self.plan.vms[vm_name]
|
|
275
|
+
|
|
276
|
+
with self._lock:
|
|
277
|
+
vm.state = VMOrchestrationState.STOPPING
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
self.cloner.stop_vm(vm_name, force=force, console=console)
|
|
281
|
+
|
|
282
|
+
with self._lock:
|
|
283
|
+
vm.state = VMOrchestrationState.STOPPED
|
|
284
|
+
|
|
285
|
+
return True
|
|
286
|
+
|
|
287
|
+
except Exception as e:
|
|
288
|
+
with self._lock:
|
|
289
|
+
vm.error = str(e)
|
|
290
|
+
return False
|
|
291
|
+
|
|
292
|
+
def _run_health_check(self, vm_name: str, timeout: int = 60) -> bool:
|
|
293
|
+
"""Run health check for a VM."""
|
|
294
|
+
vm = self.plan.vms[vm_name]
|
|
295
|
+
if not vm.health_check:
|
|
296
|
+
return True
|
|
297
|
+
|
|
298
|
+
check_type = vm.health_check.get("type", "tcp")
|
|
299
|
+
check_timeout = vm.health_check.get("timeout", "30s")
|
|
300
|
+
|
|
301
|
+
# Parse timeout
|
|
302
|
+
if isinstance(check_timeout, str):
|
|
303
|
+
if check_timeout.endswith("s"):
|
|
304
|
+
timeout = int(check_timeout[:-1])
|
|
305
|
+
elif check_timeout.endswith("m"):
|
|
306
|
+
timeout = int(check_timeout[:-1]) * 60
|
|
307
|
+
|
|
308
|
+
conn_uri = "qemu:///session" if self.user_session else "qemu:///system"
|
|
309
|
+
start = time.time()
|
|
310
|
+
|
|
311
|
+
while time.time() - start < timeout:
|
|
312
|
+
try:
|
|
313
|
+
from clonebox.cli import _qga_exec
|
|
314
|
+
|
|
315
|
+
if check_type == "tcp":
|
|
316
|
+
port = vm.health_check.get("port", 22)
|
|
317
|
+
result = _qga_exec(
|
|
318
|
+
vm_name, conn_uri,
|
|
319
|
+
f"timeout 5 bash -c 'echo > /dev/tcp/localhost/{port}' 2>/dev/null && echo OK || echo FAIL",
|
|
320
|
+
timeout=10
|
|
321
|
+
)
|
|
322
|
+
if result and "OK" in result:
|
|
323
|
+
return True
|
|
324
|
+
|
|
325
|
+
elif check_type == "http":
|
|
326
|
+
url = vm.health_check.get("url", "http://localhost/health")
|
|
327
|
+
result = _qga_exec(
|
|
328
|
+
vm_name, conn_uri,
|
|
329
|
+
f"curl -s -o /dev/null -w '%{{http_code}}' '{url}' 2>/dev/null",
|
|
330
|
+
timeout=10
|
|
331
|
+
)
|
|
332
|
+
if result:
|
|
333
|
+
status_code = int(result.strip())
|
|
334
|
+
expected = vm.health_check.get("expected_status", [200])
|
|
335
|
+
if isinstance(expected, int):
|
|
336
|
+
expected = [expected]
|
|
337
|
+
if status_code in expected:
|
|
338
|
+
return True
|
|
339
|
+
|
|
340
|
+
elif check_type == "command":
|
|
341
|
+
cmd = vm.health_check.get("exec", "true")
|
|
342
|
+
expected_output = vm.health_check.get("expected_output")
|
|
343
|
+
result = _qga_exec(vm_name, conn_uri, cmd, timeout=10)
|
|
344
|
+
if result is not None:
|
|
345
|
+
if expected_output:
|
|
346
|
+
if expected_output in result:
|
|
347
|
+
return True
|
|
348
|
+
else:
|
|
349
|
+
return True
|
|
350
|
+
|
|
351
|
+
except Exception:
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
time.sleep(2)
|
|
355
|
+
|
|
356
|
+
return False
|
|
357
|
+
|
|
358
|
+
def up(
|
|
359
|
+
self,
|
|
360
|
+
services: Optional[List[str]] = None,
|
|
361
|
+
parallel: bool = True,
|
|
362
|
+
console: Optional[Any] = None,
|
|
363
|
+
) -> OrchestrationResult:
|
|
364
|
+
"""
|
|
365
|
+
Start VMs in dependency order.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
services: Specific VMs to start (and their dependencies)
|
|
369
|
+
parallel: If True, start independent VMs in parallel
|
|
370
|
+
console: Rich console for output
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
OrchestrationResult with final states
|
|
374
|
+
"""
|
|
375
|
+
from clonebox.audit import get_audit_logger, AuditEventType, AuditOutcome
|
|
376
|
+
|
|
377
|
+
start_time = time.time()
|
|
378
|
+
audit = get_audit_logger()
|
|
379
|
+
|
|
380
|
+
# Determine which VMs to start
|
|
381
|
+
if services:
|
|
382
|
+
to_start = self._get_vms_with_dependencies(services)
|
|
383
|
+
else:
|
|
384
|
+
to_start = set(self.plan.vms.keys())
|
|
385
|
+
|
|
386
|
+
errors: Dict[str, str] = {}
|
|
387
|
+
|
|
388
|
+
with audit.operation(AuditEventType.VM_START, "orchestration", "compose") as ctx:
|
|
389
|
+
ctx.add_detail("vms", list(to_start))
|
|
390
|
+
ctx.add_detail("parallel", parallel)
|
|
391
|
+
|
|
392
|
+
self._executor = ThreadPoolExecutor(max_workers=self.max_workers if parallel else 1)
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
for level in self.plan.start_order:
|
|
396
|
+
# Filter to VMs we want to start
|
|
397
|
+
level_vms = [vm for vm in level if vm in to_start]
|
|
398
|
+
if not level_vms:
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
if parallel:
|
|
402
|
+
# Start all VMs in this level in parallel
|
|
403
|
+
futures: Dict[Future, str] = {}
|
|
404
|
+
for vm_name in level_vms:
|
|
405
|
+
future = self._executor.submit(self._start_vm, vm_name, console)
|
|
406
|
+
futures[future] = vm_name
|
|
407
|
+
|
|
408
|
+
# Wait for all to complete
|
|
409
|
+
for future in as_completed(futures):
|
|
410
|
+
vm_name = futures[future]
|
|
411
|
+
try:
|
|
412
|
+
success = future.result()
|
|
413
|
+
if not success:
|
|
414
|
+
vm = self.plan.vms[vm_name]
|
|
415
|
+
errors[vm_name] = vm.error or "Unknown error"
|
|
416
|
+
except Exception as e:
|
|
417
|
+
errors[vm_name] = str(e)
|
|
418
|
+
|
|
419
|
+
else:
|
|
420
|
+
# Start sequentially
|
|
421
|
+
for vm_name in level_vms:
|
|
422
|
+
success = self._start_vm(vm_name, console)
|
|
423
|
+
if not success:
|
|
424
|
+
vm = self.plan.vms[vm_name]
|
|
425
|
+
errors[vm_name] = vm.error or "Unknown error"
|
|
426
|
+
|
|
427
|
+
finally:
|
|
428
|
+
self._executor.shutdown(wait=True)
|
|
429
|
+
self._executor = None
|
|
430
|
+
|
|
431
|
+
duration = time.time() - start_time
|
|
432
|
+
states = {name: vm.state for name, vm in self.plan.vms.items()}
|
|
433
|
+
|
|
434
|
+
return OrchestrationResult(
|
|
435
|
+
success=len(errors) == 0,
|
|
436
|
+
states=states,
|
|
437
|
+
errors=errors,
|
|
438
|
+
duration_seconds=duration,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
def down(
|
|
442
|
+
self,
|
|
443
|
+
services: Optional[List[str]] = None,
|
|
444
|
+
force: bool = False,
|
|
445
|
+
console: Optional[Any] = None,
|
|
446
|
+
) -> OrchestrationResult:
|
|
447
|
+
"""
|
|
448
|
+
Stop VMs in reverse dependency order.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
services: Specific VMs to stop
|
|
452
|
+
force: Force stop if graceful fails
|
|
453
|
+
console: Rich console for output
|
|
454
|
+
|
|
455
|
+
Returns:
|
|
456
|
+
OrchestrationResult with final states
|
|
457
|
+
"""
|
|
458
|
+
from clonebox.audit import get_audit_logger, AuditEventType
|
|
459
|
+
|
|
460
|
+
start_time = time.time()
|
|
461
|
+
audit = get_audit_logger()
|
|
462
|
+
|
|
463
|
+
# Determine which VMs to stop
|
|
464
|
+
if services:
|
|
465
|
+
to_stop = set(services)
|
|
466
|
+
else:
|
|
467
|
+
to_stop = set(self.plan.vms.keys())
|
|
468
|
+
|
|
469
|
+
errors: Dict[str, str] = {}
|
|
470
|
+
|
|
471
|
+
with audit.operation(AuditEventType.VM_STOP, "orchestration", "compose") as ctx:
|
|
472
|
+
ctx.add_detail("vms", list(to_stop))
|
|
473
|
+
ctx.add_detail("force", force)
|
|
474
|
+
|
|
475
|
+
for level in self.plan.stop_order:
|
|
476
|
+
level_vms = [vm for vm in level if vm in to_stop]
|
|
477
|
+
for vm_name in level_vms:
|
|
478
|
+
success = self._stop_vm(vm_name, force=force, console=console)
|
|
479
|
+
if not success:
|
|
480
|
+
vm = self.plan.vms[vm_name]
|
|
481
|
+
errors[vm_name] = vm.error or "Unknown error"
|
|
482
|
+
|
|
483
|
+
duration = time.time() - start_time
|
|
484
|
+
states = {name: vm.state for name, vm in self.plan.vms.items()}
|
|
485
|
+
|
|
486
|
+
return OrchestrationResult(
|
|
487
|
+
success=len(errors) == 0,
|
|
488
|
+
states=states,
|
|
489
|
+
errors=errors,
|
|
490
|
+
duration_seconds=duration,
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
def restart(
|
|
494
|
+
self,
|
|
495
|
+
services: Optional[List[str]] = None,
|
|
496
|
+
console: Optional[Any] = None,
|
|
497
|
+
) -> OrchestrationResult:
|
|
498
|
+
"""Restart VMs (down then up)."""
|
|
499
|
+
down_result = self.down(services=services, console=console)
|
|
500
|
+
if not down_result.success:
|
|
501
|
+
return down_result
|
|
502
|
+
return self.up(services=services, console=console)
|
|
503
|
+
|
|
504
|
+
def status(self) -> Dict[str, Dict[str, Any]]:
|
|
505
|
+
"""Get status of all VMs in the orchestration."""
|
|
506
|
+
result: Dict[str, Dict[str, Any]] = {}
|
|
507
|
+
|
|
508
|
+
for name, vm in self.plan.vms.items():
|
|
509
|
+
# Try to get actual VM state from libvirt
|
|
510
|
+
try:
|
|
511
|
+
vm_info = self.cloner.get_vm_info(name)
|
|
512
|
+
actual_state = vm_info.get("state", "unknown") if vm_info else "not_found"
|
|
513
|
+
except Exception:
|
|
514
|
+
actual_state = "unknown"
|
|
515
|
+
|
|
516
|
+
result[name] = {
|
|
517
|
+
"name": name,
|
|
518
|
+
"orchestration_state": vm.state.value,
|
|
519
|
+
"actual_state": actual_state,
|
|
520
|
+
"depends_on": vm.depends_on,
|
|
521
|
+
"health_check_passed": vm.health_check_passed,
|
|
522
|
+
"error": vm.error,
|
|
523
|
+
"ip_address": vm.ip_address,
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
return result
|
|
527
|
+
|
|
528
|
+
def logs(self, vm_name: str, follow: bool = False, lines: int = 100) -> Optional[str]:
|
|
529
|
+
"""Get logs from a VM."""
|
|
530
|
+
if vm_name not in self.plan.vms:
|
|
531
|
+
raise ValueError(f"Unknown VM: {vm_name}")
|
|
532
|
+
|
|
533
|
+
conn_uri = "qemu:///session" if self.user_session else "qemu:///system"
|
|
534
|
+
|
|
535
|
+
try:
|
|
536
|
+
from clonebox.cli import _qga_exec
|
|
537
|
+
cmd = f"journalctl -n {lines}" if not follow else "journalctl -f"
|
|
538
|
+
return _qga_exec(vm_name, conn_uri, cmd, timeout=30)
|
|
539
|
+
except Exception:
|
|
540
|
+
return None
|
|
541
|
+
|
|
542
|
+
def exec(self, vm_name: str, command: str, timeout: int = 30) -> Optional[str]:
|
|
543
|
+
"""Execute command in a VM."""
|
|
544
|
+
if vm_name not in self.plan.vms:
|
|
545
|
+
raise ValueError(f"Unknown VM: {vm_name}")
|
|
546
|
+
|
|
547
|
+
conn_uri = "qemu:///session" if self.user_session else "qemu:///system"
|
|
548
|
+
|
|
549
|
+
try:
|
|
550
|
+
from clonebox.cli import _qga_exec
|
|
551
|
+
return _qga_exec(vm_name, conn_uri, command, timeout=timeout)
|
|
552
|
+
except Exception:
|
|
553
|
+
return None
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
def load_compose_file(path: Path) -> Dict[str, Any]:
|
|
557
|
+
"""Load and validate a compose file."""
|
|
558
|
+
with open(path) as f:
|
|
559
|
+
config = yaml.safe_load(f)
|
|
560
|
+
|
|
561
|
+
version = config.get("version", "1")
|
|
562
|
+
if version not in ("1", 1):
|
|
563
|
+
raise ValueError(f"Unsupported compose version: {version}")
|
|
564
|
+
|
|
565
|
+
if "vms" not in config:
|
|
566
|
+
raise ValueError("Compose file must define 'vms' section")
|
|
567
|
+
|
|
568
|
+
return config
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CloneBox Plugin System.
|
|
3
|
+
|
|
4
|
+
Provides extensibility through hooks and custom plugins.
|
|
5
|
+
"""
|
|
6
|
+
from clonebox.plugins.base import (
|
|
7
|
+
Plugin,
|
|
8
|
+
PluginHook,
|
|
9
|
+
PluginContext,
|
|
10
|
+
PluginMetadata,
|
|
11
|
+
)
|
|
12
|
+
from clonebox.plugins.manager import (
|
|
13
|
+
PluginManager,
|
|
14
|
+
get_plugin_manager,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"Plugin",
|
|
19
|
+
"PluginHook",
|
|
20
|
+
"PluginContext",
|
|
21
|
+
"PluginMetadata",
|
|
22
|
+
"PluginManager",
|
|
23
|
+
"get_plugin_manager",
|
|
24
|
+
]
|