clonebox 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clonebox/cli.py +142 -0
- clonebox/health/__init__.py +16 -0
- clonebox/health/models.py +194 -0
- clonebox/monitor.py +269 -0
- clonebox/snapshots/__init__.py +12 -0
- clonebox/snapshots/manager.py +355 -0
- clonebox/snapshots/models.py +187 -0
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/METADATA +1 -1
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/RECORD +13 -7
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/WHEEL +0 -0
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/entry_points.txt +0 -0
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/licenses/LICENSE +0 -0
- {clonebox-1.1.3.dist-info → clonebox-1.1.4.dist-info}/top_level.txt +0 -0
clonebox/cli.py
CHANGED
|
@@ -30,6 +30,7 @@ from clonebox.models import ContainerConfig
|
|
|
30
30
|
from clonebox.profiles import merge_with_profile
|
|
31
31
|
from clonebox.exporter import SecureExporter, VMExporter
|
|
32
32
|
from clonebox.importer import SecureImporter, VMImporter
|
|
33
|
+
from clonebox.monitor import ResourceMonitor, format_bytes
|
|
33
34
|
from clonebox.p2p import P2PManager
|
|
34
35
|
|
|
35
36
|
# Custom questionary style
|
|
@@ -2638,6 +2639,118 @@ def cmd_detect(args):
|
|
|
2638
2639
|
console.print(table)
|
|
2639
2640
|
|
|
2640
2641
|
|
|
2642
|
+
def cmd_monitor(args) -> None:
|
|
2643
|
+
"""Real-time resource monitoring for VMs and containers."""
|
|
2644
|
+
conn_uri = "qemu:///session" if getattr(args, "user", False) else "qemu:///system"
|
|
2645
|
+
refresh = getattr(args, "refresh", 2.0)
|
|
2646
|
+
once = getattr(args, "once", False)
|
|
2647
|
+
|
|
2648
|
+
monitor = ResourceMonitor(conn_uri)
|
|
2649
|
+
|
|
2650
|
+
try:
|
|
2651
|
+
while True:
|
|
2652
|
+
# Clear screen for live update
|
|
2653
|
+
if not once:
|
|
2654
|
+
console.clear()
|
|
2655
|
+
|
|
2656
|
+
console.print("[bold cyan]📊 CloneBox Resource Monitor[/]")
|
|
2657
|
+
console.print()
|
|
2658
|
+
|
|
2659
|
+
# VM Stats
|
|
2660
|
+
vm_stats = monitor.get_all_vm_stats()
|
|
2661
|
+
if vm_stats:
|
|
2662
|
+
table = Table(title="🖥️ Virtual Machines", border_style="cyan")
|
|
2663
|
+
table.add_column("Name", style="bold")
|
|
2664
|
+
table.add_column("State")
|
|
2665
|
+
table.add_column("CPU %")
|
|
2666
|
+
table.add_column("Memory")
|
|
2667
|
+
table.add_column("Disk")
|
|
2668
|
+
table.add_column("Network I/O")
|
|
2669
|
+
|
|
2670
|
+
for vm in vm_stats:
|
|
2671
|
+
state_color = "green" if vm.state == "running" else "yellow"
|
|
2672
|
+
cpu_color = "red" if vm.cpu_percent > 80 else "green"
|
|
2673
|
+
mem_pct = (vm.memory_used_mb / vm.memory_total_mb * 100) if vm.memory_total_mb > 0 else 0
|
|
2674
|
+
mem_color = "red" if mem_pct > 80 else "green"
|
|
2675
|
+
|
|
2676
|
+
table.add_row(
|
|
2677
|
+
vm.name,
|
|
2678
|
+
f"[{state_color}]{vm.state}[/]",
|
|
2679
|
+
f"[{cpu_color}]{vm.cpu_percent:.1f}%[/]",
|
|
2680
|
+
f"[{mem_color}]{vm.memory_used_mb}/{vm.memory_total_mb} MB[/]",
|
|
2681
|
+
f"{vm.disk_used_gb:.1f}/{vm.disk_total_gb:.1f} GB",
|
|
2682
|
+
f"↓{format_bytes(vm.network_rx_bytes)} ↑{format_bytes(vm.network_tx_bytes)}",
|
|
2683
|
+
)
|
|
2684
|
+
console.print(table)
|
|
2685
|
+
else:
|
|
2686
|
+
console.print("[dim]No VMs found.[/]")
|
|
2687
|
+
|
|
2688
|
+
console.print()
|
|
2689
|
+
|
|
2690
|
+
# Container Stats
|
|
2691
|
+
container_stats = monitor.get_container_stats()
|
|
2692
|
+
if container_stats:
|
|
2693
|
+
table = Table(title="🐳 Containers", border_style="blue")
|
|
2694
|
+
table.add_column("Name", style="bold")
|
|
2695
|
+
table.add_column("State")
|
|
2696
|
+
table.add_column("CPU %")
|
|
2697
|
+
table.add_column("Memory")
|
|
2698
|
+
table.add_column("Network I/O")
|
|
2699
|
+
table.add_column("PIDs")
|
|
2700
|
+
|
|
2701
|
+
for c in container_stats:
|
|
2702
|
+
cpu_color = "red" if c.cpu_percent > 80 else "green"
|
|
2703
|
+
mem_pct = (c.memory_used_mb / c.memory_limit_mb * 100) if c.memory_limit_mb > 0 else 0
|
|
2704
|
+
mem_color = "red" if mem_pct > 80 else "green"
|
|
2705
|
+
|
|
2706
|
+
table.add_row(
|
|
2707
|
+
c.name,
|
|
2708
|
+
f"[green]{c.state}[/]",
|
|
2709
|
+
f"[{cpu_color}]{c.cpu_percent:.1f}%[/]",
|
|
2710
|
+
f"[{mem_color}]{c.memory_used_mb}/{c.memory_limit_mb} MB[/]",
|
|
2711
|
+
f"↓{format_bytes(c.network_rx_bytes)} ↑{format_bytes(c.network_tx_bytes)}",
|
|
2712
|
+
str(c.pids),
|
|
2713
|
+
)
|
|
2714
|
+
console.print(table)
|
|
2715
|
+
else:
|
|
2716
|
+
console.print("[dim]No containers running.[/]")
|
|
2717
|
+
|
|
2718
|
+
if once:
|
|
2719
|
+
break
|
|
2720
|
+
|
|
2721
|
+
console.print(f"\n[dim]Refreshing every {refresh}s. Press Ctrl+C to exit.[/]")
|
|
2722
|
+
time.sleep(refresh)
|
|
2723
|
+
|
|
2724
|
+
except KeyboardInterrupt:
|
|
2725
|
+
console.print("\n[yellow]Monitoring stopped.[/]")
|
|
2726
|
+
finally:
|
|
2727
|
+
monitor.close()
|
|
2728
|
+
|
|
2729
|
+
|
|
2730
|
+
def cmd_exec(args) -> None:
|
|
2731
|
+
"""Execute command in VM via QEMU Guest Agent."""
|
|
2732
|
+
vm_name, config_file = _resolve_vm_name_and_config_file(args.name)
|
|
2733
|
+
conn_uri = "qemu:///session" if getattr(args, "user", False) else "qemu:///system"
|
|
2734
|
+
command = args.command
|
|
2735
|
+
timeout = getattr(args, "timeout", 30)
|
|
2736
|
+
|
|
2737
|
+
if not _qga_ping(vm_name, conn_uri):
|
|
2738
|
+
console.print(f"[red]❌ Cannot connect to VM '{vm_name}' via QEMU Guest Agent[/]")
|
|
2739
|
+
console.print("[dim]Make sure the VM is running and qemu-guest-agent is installed.[/]")
|
|
2740
|
+
return
|
|
2741
|
+
|
|
2742
|
+
console.print(f"[cyan]▶ Executing in {vm_name}:[/] {command}")
|
|
2743
|
+
|
|
2744
|
+
result = _qga_exec(vm_name, conn_uri, command, timeout=timeout)
|
|
2745
|
+
|
|
2746
|
+
if result is None:
|
|
2747
|
+
console.print("[red]❌ Command execution failed or timed out[/]")
|
|
2748
|
+
elif result == "":
|
|
2749
|
+
console.print("[dim](no output)[/]")
|
|
2750
|
+
else:
|
|
2751
|
+
console.print(result)
|
|
2752
|
+
|
|
2753
|
+
|
|
2641
2754
|
def cmd_keygen(args) -> None:
|
|
2642
2755
|
"""Generate encryption key for secure P2P transfers."""
|
|
2643
2756
|
key_path = SecureExporter.generate_key()
|
|
@@ -3198,6 +3311,35 @@ def main():
|
|
|
3198
3311
|
)
|
|
3199
3312
|
test_parser.set_defaults(func=cmd_test)
|
|
3200
3313
|
|
|
3314
|
+
# Monitor command - real-time resource monitoring
|
|
3315
|
+
monitor_parser = subparsers.add_parser("monitor", help="Real-time resource monitoring")
|
|
3316
|
+
monitor_parser.add_argument(
|
|
3317
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
3318
|
+
)
|
|
3319
|
+
monitor_parser.add_argument(
|
|
3320
|
+
"--refresh", "-r", type=float, default=2.0, help="Refresh interval in seconds (default: 2)"
|
|
3321
|
+
)
|
|
3322
|
+
monitor_parser.add_argument(
|
|
3323
|
+
"--once", action="store_true", help="Show stats once and exit"
|
|
3324
|
+
)
|
|
3325
|
+
monitor_parser.set_defaults(func=cmd_monitor)
|
|
3326
|
+
|
|
3327
|
+
# Exec command - execute command in VM
|
|
3328
|
+
exec_parser = subparsers.add_parser("exec", help="Execute command in VM via QEMU Guest Agent")
|
|
3329
|
+
exec_parser.add_argument(
|
|
3330
|
+
"name", nargs="?", default=None, help="VM name or '.' to use .clonebox.yaml"
|
|
3331
|
+
)
|
|
3332
|
+
exec_parser.add_argument(
|
|
3333
|
+
"command", help="Command to execute in VM"
|
|
3334
|
+
)
|
|
3335
|
+
exec_parser.add_argument(
|
|
3336
|
+
"-u", "--user", action="store_true", help="Use user session (qemu:///session)"
|
|
3337
|
+
)
|
|
3338
|
+
exec_parser.add_argument(
|
|
3339
|
+
"--timeout", "-t", type=int, default=30, help="Command timeout in seconds (default: 30)"
|
|
3340
|
+
)
|
|
3341
|
+
exec_parser.set_defaults(func=cmd_exec)
|
|
3342
|
+
|
|
3201
3343
|
# === P2P Secure Transfer Commands ===
|
|
3202
3344
|
|
|
3203
3345
|
# Keygen command - generate encryption key
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Health check system for CloneBox VMs."""
|
|
2
|
+
|
|
3
|
+
from .models import HealthCheckResult, HealthStatus, ProbeConfig
|
|
4
|
+
from .probes import HTTPProbe, TCPProbe, CommandProbe, ScriptProbe
|
|
5
|
+
from .manager import HealthCheckManager
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"HealthCheckResult",
|
|
9
|
+
"HealthStatus",
|
|
10
|
+
"ProbeConfig",
|
|
11
|
+
"HTTPProbe",
|
|
12
|
+
"TCPProbe",
|
|
13
|
+
"CommandProbe",
|
|
14
|
+
"ScriptProbe",
|
|
15
|
+
"HealthCheckManager",
|
|
16
|
+
]
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Data models for health check system."""
|
|
3
|
+
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class HealthStatus(Enum):
|
|
11
|
+
"""Health check status."""
|
|
12
|
+
|
|
13
|
+
HEALTHY = "healthy"
|
|
14
|
+
UNHEALTHY = "unhealthy"
|
|
15
|
+
DEGRADED = "degraded"
|
|
16
|
+
UNKNOWN = "unknown"
|
|
17
|
+
TIMEOUT = "timeout"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ProbeType(Enum):
|
|
21
|
+
"""Type of health probe."""
|
|
22
|
+
|
|
23
|
+
HTTP = "http"
|
|
24
|
+
TCP = "tcp"
|
|
25
|
+
COMMAND = "command"
|
|
26
|
+
SCRIPT = "script"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ProbeConfig:
|
|
31
|
+
"""Configuration for a health probe."""
|
|
32
|
+
|
|
33
|
+
name: str
|
|
34
|
+
probe_type: ProbeType
|
|
35
|
+
enabled: bool = True
|
|
36
|
+
|
|
37
|
+
# Timing
|
|
38
|
+
timeout_seconds: float = 5.0
|
|
39
|
+
interval_seconds: float = 30.0
|
|
40
|
+
retries: int = 3
|
|
41
|
+
retry_delay_seconds: float = 1.0
|
|
42
|
+
|
|
43
|
+
# HTTP probe
|
|
44
|
+
url: Optional[str] = None
|
|
45
|
+
method: str = "GET"
|
|
46
|
+
expected_status: int = 200
|
|
47
|
+
expected_body: Optional[str] = None
|
|
48
|
+
expected_json: Optional[Dict[str, Any]] = None
|
|
49
|
+
headers: Dict[str, str] = field(default_factory=dict)
|
|
50
|
+
|
|
51
|
+
# TCP probe
|
|
52
|
+
host: str = "localhost"
|
|
53
|
+
port: Optional[int] = None
|
|
54
|
+
|
|
55
|
+
# Command probe
|
|
56
|
+
command: Optional[str] = None
|
|
57
|
+
expected_output: Optional[str] = None
|
|
58
|
+
expected_exit_code: int = 0
|
|
59
|
+
|
|
60
|
+
# Script probe
|
|
61
|
+
script_path: Optional[str] = None
|
|
62
|
+
|
|
63
|
+
# Thresholds
|
|
64
|
+
failure_threshold: int = 3 # Consecutive failures before unhealthy
|
|
65
|
+
success_threshold: int = 1 # Consecutive successes before healthy
|
|
66
|
+
|
|
67
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
68
|
+
"""Convert to dictionary."""
|
|
69
|
+
return {
|
|
70
|
+
"name": self.name,
|
|
71
|
+
"type": self.probe_type.value,
|
|
72
|
+
"enabled": self.enabled,
|
|
73
|
+
"timeout": self.timeout_seconds,
|
|
74
|
+
"interval": self.interval_seconds,
|
|
75
|
+
"retries": self.retries,
|
|
76
|
+
"url": self.url,
|
|
77
|
+
"method": self.method,
|
|
78
|
+
"expected_status": self.expected_status,
|
|
79
|
+
"expected_body": self.expected_body,
|
|
80
|
+
"expected_json": self.expected_json,
|
|
81
|
+
"headers": self.headers,
|
|
82
|
+
"host": self.host,
|
|
83
|
+
"port": self.port,
|
|
84
|
+
"command": self.command,
|
|
85
|
+
"expected_output": self.expected_output,
|
|
86
|
+
"expected_exit_code": self.expected_exit_code,
|
|
87
|
+
"script_path": self.script_path,
|
|
88
|
+
"failure_threshold": self.failure_threshold,
|
|
89
|
+
"success_threshold": self.success_threshold,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
@classmethod
|
|
93
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ProbeConfig":
|
|
94
|
+
"""Create from dictionary."""
|
|
95
|
+
return cls(
|
|
96
|
+
name=data["name"],
|
|
97
|
+
probe_type=ProbeType(data.get("type", "command")),
|
|
98
|
+
enabled=data.get("enabled", True),
|
|
99
|
+
timeout_seconds=data.get("timeout", 5.0),
|
|
100
|
+
interval_seconds=data.get("interval", 30.0),
|
|
101
|
+
retries=data.get("retries", 3),
|
|
102
|
+
url=data.get("url"),
|
|
103
|
+
method=data.get("method", "GET"),
|
|
104
|
+
expected_status=data.get("expected_status", 200),
|
|
105
|
+
expected_body=data.get("expected_body"),
|
|
106
|
+
expected_json=data.get("expected_json"),
|
|
107
|
+
headers=data.get("headers", {}),
|
|
108
|
+
host=data.get("host", "localhost"),
|
|
109
|
+
port=data.get("port"),
|
|
110
|
+
command=data.get("command") or data.get("exec"),
|
|
111
|
+
expected_output=data.get("expected_output"),
|
|
112
|
+
expected_exit_code=data.get("expected_exit_code", data.get("exit_code", 0)),
|
|
113
|
+
script_path=data.get("script_path") or data.get("path"),
|
|
114
|
+
failure_threshold=data.get("failure_threshold", 3),
|
|
115
|
+
success_threshold=data.get("success_threshold", 1),
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@dataclass
|
|
120
|
+
class HealthCheckResult:
|
|
121
|
+
"""Result of a health check."""
|
|
122
|
+
|
|
123
|
+
probe_name: str
|
|
124
|
+
status: HealthStatus
|
|
125
|
+
checked_at: datetime
|
|
126
|
+
duration_ms: float
|
|
127
|
+
|
|
128
|
+
message: Optional[str] = None
|
|
129
|
+
error: Optional[str] = None
|
|
130
|
+
details: Dict[str, Any] = field(default_factory=dict)
|
|
131
|
+
|
|
132
|
+
# Response info (for HTTP)
|
|
133
|
+
response_code: Optional[int] = None
|
|
134
|
+
response_body: Optional[str] = None
|
|
135
|
+
|
|
136
|
+
# Command info
|
|
137
|
+
exit_code: Optional[int] = None
|
|
138
|
+
stdout: Optional[str] = None
|
|
139
|
+
stderr: Optional[str] = None
|
|
140
|
+
|
|
141
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
142
|
+
"""Convert to dictionary."""
|
|
143
|
+
return {
|
|
144
|
+
"probe_name": self.probe_name,
|
|
145
|
+
"status": self.status.value,
|
|
146
|
+
"checked_at": self.checked_at.isoformat(),
|
|
147
|
+
"duration_ms": self.duration_ms,
|
|
148
|
+
"message": self.message,
|
|
149
|
+
"error": self.error,
|
|
150
|
+
"details": self.details,
|
|
151
|
+
"response_code": self.response_code,
|
|
152
|
+
"exit_code": self.exit_code,
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
@property
|
|
156
|
+
def is_healthy(self) -> bool:
|
|
157
|
+
"""Check if result indicates healthy status."""
|
|
158
|
+
return self.status == HealthStatus.HEALTHY
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
@dataclass
|
|
162
|
+
class VMHealthState:
|
|
163
|
+
"""Aggregated health state for a VM."""
|
|
164
|
+
|
|
165
|
+
vm_name: str
|
|
166
|
+
overall_status: HealthStatus
|
|
167
|
+
last_check: datetime
|
|
168
|
+
check_results: List[HealthCheckResult] = field(default_factory=list)
|
|
169
|
+
|
|
170
|
+
# Counters
|
|
171
|
+
consecutive_failures: int = 0
|
|
172
|
+
consecutive_successes: int = 0
|
|
173
|
+
total_checks: int = 0
|
|
174
|
+
total_failures: int = 0
|
|
175
|
+
|
|
176
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
177
|
+
"""Convert to dictionary."""
|
|
178
|
+
return {
|
|
179
|
+
"vm_name": self.vm_name,
|
|
180
|
+
"overall_status": self.overall_status.value,
|
|
181
|
+
"last_check": self.last_check.isoformat(),
|
|
182
|
+
"check_results": [r.to_dict() for r in self.check_results],
|
|
183
|
+
"consecutive_failures": self.consecutive_failures,
|
|
184
|
+
"consecutive_successes": self.consecutive_successes,
|
|
185
|
+
"total_checks": self.total_checks,
|
|
186
|
+
"total_failures": self.total_failures,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
@property
|
|
190
|
+
def failure_rate(self) -> float:
|
|
191
|
+
"""Calculate failure rate percentage."""
|
|
192
|
+
if self.total_checks == 0:
|
|
193
|
+
return 0.0
|
|
194
|
+
return (self.total_failures / self.total_checks) * 100
|
clonebox/monitor.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Real-time resource monitoring for CloneBox VMs and containers.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import subprocess
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
import libvirt
|
|
15
|
+
except ImportError:
|
|
16
|
+
libvirt = None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class VMStats:
|
|
21
|
+
"""VM resource statistics."""
|
|
22
|
+
|
|
23
|
+
name: str
|
|
24
|
+
state: str
|
|
25
|
+
cpu_percent: float
|
|
26
|
+
memory_used_mb: int
|
|
27
|
+
memory_total_mb: int
|
|
28
|
+
disk_used_gb: float
|
|
29
|
+
disk_total_gb: float
|
|
30
|
+
network_rx_bytes: int
|
|
31
|
+
network_tx_bytes: int
|
|
32
|
+
uptime_seconds: int
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class ContainerStats:
|
|
37
|
+
"""Container resource statistics."""
|
|
38
|
+
|
|
39
|
+
name: str
|
|
40
|
+
state: str
|
|
41
|
+
cpu_percent: float
|
|
42
|
+
memory_used_mb: int
|
|
43
|
+
memory_limit_mb: int
|
|
44
|
+
network_rx_bytes: int
|
|
45
|
+
network_tx_bytes: int
|
|
46
|
+
pids: int
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ResourceMonitor:
|
|
50
|
+
"""Monitor VM and container resources in real-time."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, conn_uri: str = "qemu:///session"):
|
|
53
|
+
self.conn_uri = conn_uri
|
|
54
|
+
self._conn = None
|
|
55
|
+
self._prev_cpu: Dict[str, tuple] = {}
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def conn(self):
|
|
59
|
+
if self._conn is None:
|
|
60
|
+
if libvirt is None:
|
|
61
|
+
raise RuntimeError("libvirt-python not installed")
|
|
62
|
+
self._conn = libvirt.open(self.conn_uri)
|
|
63
|
+
return self._conn
|
|
64
|
+
|
|
65
|
+
def get_vm_stats(self, vm_name: str) -> Optional[VMStats]:
|
|
66
|
+
"""Get resource statistics for a VM."""
|
|
67
|
+
try:
|
|
68
|
+
dom = self.conn.lookupByName(vm_name)
|
|
69
|
+
info = dom.info()
|
|
70
|
+
|
|
71
|
+
state_map = {
|
|
72
|
+
libvirt.VIR_DOMAIN_RUNNING: "running",
|
|
73
|
+
libvirt.VIR_DOMAIN_PAUSED: "paused",
|
|
74
|
+
libvirt.VIR_DOMAIN_SHUTDOWN: "shutdown",
|
|
75
|
+
libvirt.VIR_DOMAIN_SHUTOFF: "shutoff",
|
|
76
|
+
libvirt.VIR_DOMAIN_CRASHED: "crashed",
|
|
77
|
+
}
|
|
78
|
+
state = state_map.get(info[0], "unknown")
|
|
79
|
+
|
|
80
|
+
# Memory
|
|
81
|
+
memory_total_mb = info[1] // 1024
|
|
82
|
+
memory_used_mb = info[2] // 1024 if info[2] > 0 else memory_total_mb
|
|
83
|
+
|
|
84
|
+
# CPU percentage (requires two samples)
|
|
85
|
+
cpu_time = info[4]
|
|
86
|
+
now = time.time()
|
|
87
|
+
cpu_percent = 0.0
|
|
88
|
+
|
|
89
|
+
if vm_name in self._prev_cpu:
|
|
90
|
+
prev_time, prev_cpu = self._prev_cpu[vm_name]
|
|
91
|
+
time_delta = now - prev_time
|
|
92
|
+
if time_delta > 0:
|
|
93
|
+
cpu_delta = cpu_time - prev_cpu
|
|
94
|
+
# CPU time is in nanoseconds
|
|
95
|
+
cpu_percent = (cpu_delta / (time_delta * 1e9)) * 100
|
|
96
|
+
cpu_percent = min(cpu_percent, 100.0 * info[3]) # Cap at vcpus * 100%
|
|
97
|
+
|
|
98
|
+
self._prev_cpu[vm_name] = (now, cpu_time)
|
|
99
|
+
|
|
100
|
+
# Disk stats (from block devices)
|
|
101
|
+
disk_used_gb = 0.0
|
|
102
|
+
disk_total_gb = 0.0
|
|
103
|
+
try:
|
|
104
|
+
xml = dom.XMLDesc()
|
|
105
|
+
import xml.etree.ElementTree as ET
|
|
106
|
+
|
|
107
|
+
root = ET.fromstring(xml)
|
|
108
|
+
for disk in root.findall(".//disk[@type='file']"):
|
|
109
|
+
source = disk.find(".//source")
|
|
110
|
+
if source is not None and source.get("file"):
|
|
111
|
+
disk_path = Path(source.get("file"))
|
|
112
|
+
if disk_path.exists():
|
|
113
|
+
size_bytes = disk_path.stat().st_size
|
|
114
|
+
disk_total_gb += size_bytes / (1024**3)
|
|
115
|
+
# Actual usage requires qemu-img info
|
|
116
|
+
disk_used_gb += size_bytes / (1024**3)
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
# Network stats
|
|
121
|
+
network_rx = 0
|
|
122
|
+
network_tx = 0
|
|
123
|
+
try:
|
|
124
|
+
for iface in dom.interfaceAddresses(
|
|
125
|
+
libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT
|
|
126
|
+
).keys():
|
|
127
|
+
stats = dom.interfaceStats(iface)
|
|
128
|
+
network_rx += stats[0]
|
|
129
|
+
network_tx += stats[4]
|
|
130
|
+
except Exception:
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
return VMStats(
|
|
134
|
+
name=vm_name,
|
|
135
|
+
state=state,
|
|
136
|
+
cpu_percent=cpu_percent,
|
|
137
|
+
memory_used_mb=memory_used_mb,
|
|
138
|
+
memory_total_mb=memory_total_mb,
|
|
139
|
+
disk_used_gb=disk_used_gb,
|
|
140
|
+
disk_total_gb=disk_total_gb,
|
|
141
|
+
network_rx_bytes=network_rx,
|
|
142
|
+
network_tx_bytes=network_tx,
|
|
143
|
+
uptime_seconds=0, # Would need guest agent for accurate uptime
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
except Exception:
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
def get_all_vm_stats(self) -> List[VMStats]:
|
|
150
|
+
"""Get stats for all VMs."""
|
|
151
|
+
stats = []
|
|
152
|
+
try:
|
|
153
|
+
for dom in self.conn.listAllDomains():
|
|
154
|
+
vm_stats = self.get_vm_stats(dom.name())
|
|
155
|
+
if vm_stats:
|
|
156
|
+
stats.append(vm_stats)
|
|
157
|
+
except Exception:
|
|
158
|
+
pass
|
|
159
|
+
return stats
|
|
160
|
+
|
|
161
|
+
def get_container_stats(self, engine: str = "auto") -> List[ContainerStats]:
|
|
162
|
+
"""Get resource statistics for containers."""
|
|
163
|
+
if engine == "auto":
|
|
164
|
+
engine = "podman" if self._check_engine("podman") else "docker"
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
result = subprocess.run(
|
|
168
|
+
[engine, "stats", "--no-stream", "--format", "json"],
|
|
169
|
+
capture_output=True,
|
|
170
|
+
text=True,
|
|
171
|
+
timeout=10,
|
|
172
|
+
)
|
|
173
|
+
if result.returncode != 0:
|
|
174
|
+
return []
|
|
175
|
+
|
|
176
|
+
containers = json.loads(result.stdout) if result.stdout.strip() else []
|
|
177
|
+
stats = []
|
|
178
|
+
|
|
179
|
+
for c in containers:
|
|
180
|
+
# Parse CPU percentage
|
|
181
|
+
cpu_str = c.get("CPUPerc", "0%").replace("%", "")
|
|
182
|
+
try:
|
|
183
|
+
cpu_percent = float(cpu_str)
|
|
184
|
+
except ValueError:
|
|
185
|
+
cpu_percent = 0.0
|
|
186
|
+
|
|
187
|
+
# Parse memory
|
|
188
|
+
mem_usage = c.get("MemUsage", "0MiB / 0MiB")
|
|
189
|
+
mem_parts = mem_usage.split("/")
|
|
190
|
+
mem_used = self._parse_memory(mem_parts[0].strip()) if len(mem_parts) > 0 else 0
|
|
191
|
+
mem_limit = self._parse_memory(mem_parts[1].strip()) if len(mem_parts) > 1 else 0
|
|
192
|
+
|
|
193
|
+
# Parse network
|
|
194
|
+
net_io = c.get("NetIO", "0B / 0B")
|
|
195
|
+
net_parts = net_io.split("/")
|
|
196
|
+
net_rx = self._parse_bytes(net_parts[0].strip()) if len(net_parts) > 0 else 0
|
|
197
|
+
net_tx = self._parse_bytes(net_parts[1].strip()) if len(net_parts) > 1 else 0
|
|
198
|
+
|
|
199
|
+
stats.append(
|
|
200
|
+
ContainerStats(
|
|
201
|
+
name=c.get("Name", c.get("Names", "unknown")),
|
|
202
|
+
state="running",
|
|
203
|
+
cpu_percent=cpu_percent,
|
|
204
|
+
memory_used_mb=mem_used,
|
|
205
|
+
memory_limit_mb=mem_limit,
|
|
206
|
+
network_rx_bytes=net_rx,
|
|
207
|
+
network_tx_bytes=net_tx,
|
|
208
|
+
pids=int(c.get("PIDs", 0)),
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
return stats
|
|
213
|
+
|
|
214
|
+
except Exception:
|
|
215
|
+
return []
|
|
216
|
+
|
|
217
|
+
def _check_engine(self, engine: str) -> bool:
|
|
218
|
+
"""Check if container engine is available."""
|
|
219
|
+
try:
|
|
220
|
+
result = subprocess.run(
|
|
221
|
+
[engine, "--version"], capture_output=True, timeout=5
|
|
222
|
+
)
|
|
223
|
+
return result.returncode == 0
|
|
224
|
+
except Exception:
|
|
225
|
+
return False
|
|
226
|
+
|
|
227
|
+
def _parse_memory(self, mem_str: str) -> int:
|
|
228
|
+
"""Parse memory string like '100MiB' to MB."""
|
|
229
|
+
mem_str = mem_str.upper()
|
|
230
|
+
try:
|
|
231
|
+
if "GIB" in mem_str or "GB" in mem_str:
|
|
232
|
+
return int(float(mem_str.replace("GIB", "").replace("GB", "").strip()) * 1024)
|
|
233
|
+
elif "MIB" in mem_str or "MB" in mem_str:
|
|
234
|
+
return int(float(mem_str.replace("MIB", "").replace("MB", "").strip()))
|
|
235
|
+
elif "KIB" in mem_str or "KB" in mem_str:
|
|
236
|
+
return int(float(mem_str.replace("KIB", "").replace("KB", "").strip()) / 1024)
|
|
237
|
+
else:
|
|
238
|
+
return int(float(mem_str.replace("B", "").strip()) / (1024 * 1024))
|
|
239
|
+
except ValueError:
|
|
240
|
+
return 0
|
|
241
|
+
|
|
242
|
+
def _parse_bytes(self, bytes_str: str) -> int:
|
|
243
|
+
"""Parse byte string like '1.5GB' to bytes."""
|
|
244
|
+
bytes_str = bytes_str.upper()
|
|
245
|
+
try:
|
|
246
|
+
if "GB" in bytes_str:
|
|
247
|
+
return int(float(bytes_str.replace("GB", "").strip()) * 1024**3)
|
|
248
|
+
elif "MB" in bytes_str:
|
|
249
|
+
return int(float(bytes_str.replace("MB", "").strip()) * 1024**2)
|
|
250
|
+
elif "KB" in bytes_str:
|
|
251
|
+
return int(float(bytes_str.replace("KB", "").strip()) * 1024)
|
|
252
|
+
else:
|
|
253
|
+
return int(float(bytes_str.replace("B", "").strip()))
|
|
254
|
+
except ValueError:
|
|
255
|
+
return 0
|
|
256
|
+
|
|
257
|
+
def close(self) -> None:
|
|
258
|
+
if self._conn is not None:
|
|
259
|
+
self._conn.close()
|
|
260
|
+
self._conn = None
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def format_bytes(num_bytes: int) -> str:
|
|
264
|
+
"""Format bytes to human-readable string."""
|
|
265
|
+
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
|
266
|
+
if abs(num_bytes) < 1024.0:
|
|
267
|
+
return f"{num_bytes:.1f}{unit}"
|
|
268
|
+
num_bytes /= 1024.0
|
|
269
|
+
return f"{num_bytes:.1f}PB"
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Snapshot management for CloneBox VMs."""
|
|
2
|
+
|
|
3
|
+
from .models import Snapshot, SnapshotType, SnapshotState, SnapshotPolicy
|
|
4
|
+
from .manager import SnapshotManager
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"Snapshot",
|
|
8
|
+
"SnapshotType",
|
|
9
|
+
"SnapshotState",
|
|
10
|
+
"SnapshotPolicy",
|
|
11
|
+
"SnapshotManager",
|
|
12
|
+
]
|
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Snapshot manager for CloneBox VMs."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import subprocess
|
|
6
|
+
from datetime import datetime, timedelta
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
from .models import Snapshot, SnapshotPolicy, SnapshotState, SnapshotType
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import libvirt
|
|
14
|
+
except ImportError:
|
|
15
|
+
libvirt = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SnapshotManager:
|
|
19
|
+
"""Manage VM snapshots via libvirt."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, conn_uri: str = "qemu:///session"):
|
|
22
|
+
self.conn_uri = conn_uri
|
|
23
|
+
self._conn = None
|
|
24
|
+
self._snapshots_dir = Path.home() / ".local/share/clonebox/snapshots"
|
|
25
|
+
self._snapshots_dir.mkdir(parents=True, exist_ok=True)
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def conn(self):
|
|
29
|
+
if self._conn is None:
|
|
30
|
+
if libvirt is None:
|
|
31
|
+
raise RuntimeError("libvirt-python not installed")
|
|
32
|
+
self._conn = libvirt.open(self.conn_uri)
|
|
33
|
+
return self._conn
|
|
34
|
+
|
|
35
|
+
def create(
|
|
36
|
+
self,
|
|
37
|
+
vm_name: str,
|
|
38
|
+
name: str,
|
|
39
|
+
description: Optional[str] = None,
|
|
40
|
+
snapshot_type: SnapshotType = SnapshotType.DISK_ONLY,
|
|
41
|
+
tags: Optional[List[str]] = None,
|
|
42
|
+
auto_policy: Optional[str] = None,
|
|
43
|
+
expires_in_days: Optional[int] = None,
|
|
44
|
+
) -> Snapshot:
|
|
45
|
+
"""Create a new snapshot.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
vm_name: Name of VM to snapshot
|
|
49
|
+
name: Snapshot name
|
|
50
|
+
description: Optional description
|
|
51
|
+
snapshot_type: Type of snapshot (disk, full, external)
|
|
52
|
+
tags: Optional tags for categorization
|
|
53
|
+
auto_policy: If auto-created, the policy name
|
|
54
|
+
expires_in_days: Auto-expire after N days
|
|
55
|
+
"""
|
|
56
|
+
domain = self.conn.lookupByName(vm_name)
|
|
57
|
+
|
|
58
|
+
# Generate snapshot XML
|
|
59
|
+
snapshot_xml = self._generate_snapshot_xml(
|
|
60
|
+
name=name,
|
|
61
|
+
description=description,
|
|
62
|
+
snapshot_type=snapshot_type,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Create snapshot
|
|
66
|
+
flags = 0
|
|
67
|
+
if snapshot_type == SnapshotType.DISK_ONLY:
|
|
68
|
+
flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
|
|
69
|
+
elif snapshot_type == SnapshotType.FULL:
|
|
70
|
+
flags = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
snap = domain.snapshotCreateXML(snapshot_xml, flags)
|
|
74
|
+
except libvirt.libvirtError as e:
|
|
75
|
+
raise RuntimeError(f"Failed to create snapshot: {e}")
|
|
76
|
+
|
|
77
|
+
# Build snapshot object
|
|
78
|
+
snapshot = Snapshot(
|
|
79
|
+
name=name,
|
|
80
|
+
vm_name=vm_name,
|
|
81
|
+
snapshot_type=snapshot_type,
|
|
82
|
+
state=SnapshotState.READY,
|
|
83
|
+
created_at=datetime.now(),
|
|
84
|
+
description=description,
|
|
85
|
+
tags=tags or [],
|
|
86
|
+
auto_created=auto_policy is not None,
|
|
87
|
+
auto_policy=auto_policy,
|
|
88
|
+
expires_at=(
|
|
89
|
+
datetime.now() + timedelta(days=expires_in_days)
|
|
90
|
+
if expires_in_days
|
|
91
|
+
else None
|
|
92
|
+
),
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Save metadata
|
|
96
|
+
self._save_snapshot_metadata(snapshot)
|
|
97
|
+
|
|
98
|
+
return snapshot
|
|
99
|
+
|
|
100
|
+
def restore(
|
|
101
|
+
self,
|
|
102
|
+
vm_name: str,
|
|
103
|
+
name: str,
|
|
104
|
+
force: bool = False,
|
|
105
|
+
) -> bool:
|
|
106
|
+
"""Restore VM to a snapshot.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
vm_name: Name of VM
|
|
110
|
+
name: Snapshot name to restore
|
|
111
|
+
force: Force restore even if VM is running
|
|
112
|
+
"""
|
|
113
|
+
domain = self.conn.lookupByName(vm_name)
|
|
114
|
+
|
|
115
|
+
# Check if VM is running
|
|
116
|
+
if domain.isActive() and not force:
|
|
117
|
+
raise RuntimeError(
|
|
118
|
+
f"VM '{vm_name}' is running. Stop it first or use --force"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
snap = domain.snapshotLookupByName(name)
|
|
123
|
+
except libvirt.libvirtError:
|
|
124
|
+
raise RuntimeError(f"Snapshot '{name}' not found for VM '{vm_name}'")
|
|
125
|
+
|
|
126
|
+
# Revert to snapshot
|
|
127
|
+
flags = libvirt.VIR_DOMAIN_SNAPSHOT_REVERT_FORCE if force else 0
|
|
128
|
+
try:
|
|
129
|
+
domain.revertToSnapshot(snap, flags)
|
|
130
|
+
except libvirt.libvirtError as e:
|
|
131
|
+
raise RuntimeError(f"Failed to restore snapshot: {e}")
|
|
132
|
+
|
|
133
|
+
return True
|
|
134
|
+
|
|
135
|
+
def delete(
|
|
136
|
+
self,
|
|
137
|
+
vm_name: str,
|
|
138
|
+
name: str,
|
|
139
|
+
delete_children: bool = False,
|
|
140
|
+
) -> bool:
|
|
141
|
+
"""Delete a snapshot.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
vm_name: Name of VM
|
|
145
|
+
name: Snapshot name to delete
|
|
146
|
+
delete_children: Also delete child snapshots
|
|
147
|
+
"""
|
|
148
|
+
domain = self.conn.lookupByName(vm_name)
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
snap = domain.snapshotLookupByName(name)
|
|
152
|
+
except libvirt.libvirtError:
|
|
153
|
+
raise RuntimeError(f"Snapshot '{name}' not found for VM '{vm_name}'")
|
|
154
|
+
|
|
155
|
+
flags = 0
|
|
156
|
+
if delete_children:
|
|
157
|
+
flags = libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN
|
|
158
|
+
|
|
159
|
+
try:
|
|
160
|
+
snap.delete(flags)
|
|
161
|
+
except libvirt.libvirtError as e:
|
|
162
|
+
raise RuntimeError(f"Failed to delete snapshot: {e}")
|
|
163
|
+
|
|
164
|
+
# Remove metadata
|
|
165
|
+
self._delete_snapshot_metadata(vm_name, name)
|
|
166
|
+
|
|
167
|
+
return True
|
|
168
|
+
|
|
169
|
+
def list(self, vm_name: str) -> List[Snapshot]:
|
|
170
|
+
"""List all snapshots for a VM."""
|
|
171
|
+
domain = self.conn.lookupByName(vm_name)
|
|
172
|
+
snapshots = []
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
snap_names = domain.snapshotListNames()
|
|
176
|
+
except libvirt.libvirtError:
|
|
177
|
+
return []
|
|
178
|
+
|
|
179
|
+
for snap_name in snap_names:
|
|
180
|
+
try:
|
|
181
|
+
snap = domain.snapshotLookupByName(snap_name)
|
|
182
|
+
snap_xml = snap.getXMLDesc()
|
|
183
|
+
|
|
184
|
+
# Parse XML for details
|
|
185
|
+
import xml.etree.ElementTree as ET
|
|
186
|
+
|
|
187
|
+
root = ET.fromstring(snap_xml)
|
|
188
|
+
|
|
189
|
+
name = root.findtext("name", snap_name)
|
|
190
|
+
description = root.findtext("description", "")
|
|
191
|
+
creation_time = root.findtext("creationTime", "0")
|
|
192
|
+
|
|
193
|
+
# Check for saved metadata
|
|
194
|
+
metadata = self._load_snapshot_metadata(vm_name, name)
|
|
195
|
+
|
|
196
|
+
snapshot = Snapshot(
|
|
197
|
+
name=name,
|
|
198
|
+
vm_name=vm_name,
|
|
199
|
+
snapshot_type=SnapshotType(
|
|
200
|
+
metadata.get("type", "disk") if metadata else "disk"
|
|
201
|
+
),
|
|
202
|
+
state=SnapshotState.READY,
|
|
203
|
+
created_at=(
|
|
204
|
+
datetime.fromtimestamp(int(creation_time))
|
|
205
|
+
if creation_time != "0"
|
|
206
|
+
else datetime.now()
|
|
207
|
+
),
|
|
208
|
+
description=description or None,
|
|
209
|
+
tags=metadata.get("tags", []) if metadata else [],
|
|
210
|
+
auto_created=metadata.get("auto_created", False) if metadata else False,
|
|
211
|
+
auto_policy=metadata.get("auto_policy") if metadata else None,
|
|
212
|
+
expires_at=(
|
|
213
|
+
datetime.fromisoformat(metadata["expires_at"])
|
|
214
|
+
if metadata and metadata.get("expires_at")
|
|
215
|
+
else None
|
|
216
|
+
),
|
|
217
|
+
)
|
|
218
|
+
snapshots.append(snapshot)
|
|
219
|
+
|
|
220
|
+
except Exception:
|
|
221
|
+
continue
|
|
222
|
+
|
|
223
|
+
return sorted(snapshots, key=lambda s: s.created_at, reverse=True)
|
|
224
|
+
|
|
225
|
+
def get(self, vm_name: str, name: str) -> Optional[Snapshot]:
|
|
226
|
+
"""Get a specific snapshot."""
|
|
227
|
+
snapshots = self.list(vm_name)
|
|
228
|
+
for snap in snapshots:
|
|
229
|
+
if snap.name == name:
|
|
230
|
+
return snap
|
|
231
|
+
return None
|
|
232
|
+
|
|
233
|
+
def cleanup_expired(self, vm_name: str) -> List[str]:
|
|
234
|
+
"""Delete expired snapshots for a VM."""
|
|
235
|
+
deleted = []
|
|
236
|
+
for snapshot in self.list(vm_name):
|
|
237
|
+
if snapshot.is_expired:
|
|
238
|
+
try:
|
|
239
|
+
self.delete(vm_name, snapshot.name)
|
|
240
|
+
deleted.append(snapshot.name)
|
|
241
|
+
except Exception:
|
|
242
|
+
pass
|
|
243
|
+
return deleted
|
|
244
|
+
|
|
245
|
+
def apply_policy(self, vm_name: str, policy: SnapshotPolicy) -> List[str]:
|
|
246
|
+
"""Apply retention policy to VM snapshots."""
|
|
247
|
+
if not policy.auto_cleanup:
|
|
248
|
+
return []
|
|
249
|
+
|
|
250
|
+
snapshots = self.list(vm_name)
|
|
251
|
+
auto_snapshots = [s for s in snapshots if s.auto_policy == policy.name]
|
|
252
|
+
|
|
253
|
+
deleted = []
|
|
254
|
+
|
|
255
|
+
# Sort by age (oldest first)
|
|
256
|
+
auto_snapshots.sort(key=lambda s: s.created_at)
|
|
257
|
+
|
|
258
|
+
# Delete if over max count
|
|
259
|
+
while len(auto_snapshots) > policy.max_snapshots:
|
|
260
|
+
if len(auto_snapshots) <= policy.min_snapshots:
|
|
261
|
+
break
|
|
262
|
+
oldest = auto_snapshots.pop(0)
|
|
263
|
+
try:
|
|
264
|
+
self.delete(vm_name, oldest.name)
|
|
265
|
+
deleted.append(oldest.name)
|
|
266
|
+
except Exception:
|
|
267
|
+
pass
|
|
268
|
+
|
|
269
|
+
# Delete if over max age
|
|
270
|
+
max_age = timedelta(days=policy.max_age_days)
|
|
271
|
+
for snap in auto_snapshots[:]:
|
|
272
|
+
if snap.age > max_age:
|
|
273
|
+
if len(auto_snapshots) <= policy.min_snapshots:
|
|
274
|
+
break
|
|
275
|
+
try:
|
|
276
|
+
self.delete(vm_name, snap.name)
|
|
277
|
+
deleted.append(snap.name)
|
|
278
|
+
auto_snapshots.remove(snap)
|
|
279
|
+
except Exception:
|
|
280
|
+
pass
|
|
281
|
+
|
|
282
|
+
return deleted
|
|
283
|
+
|
|
284
|
+
def create_auto_snapshot(
|
|
285
|
+
self,
|
|
286
|
+
vm_name: str,
|
|
287
|
+
operation: str,
|
|
288
|
+
policy: Optional[SnapshotPolicy] = None,
|
|
289
|
+
) -> Snapshot:
|
|
290
|
+
"""Create automatic snapshot before operation."""
|
|
291
|
+
policy = policy or SnapshotPolicy(name="default")
|
|
292
|
+
|
|
293
|
+
name = policy.generate_snapshot_name(operation)
|
|
294
|
+
|
|
295
|
+
return self.create(
|
|
296
|
+
vm_name=vm_name,
|
|
297
|
+
name=name,
|
|
298
|
+
description=f"Auto-snapshot before {operation}",
|
|
299
|
+
snapshot_type=SnapshotType.DISK_ONLY,
|
|
300
|
+
auto_policy=policy.name,
|
|
301
|
+
expires_in_days=policy.max_age_days,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
def _generate_snapshot_xml(
|
|
305
|
+
self,
|
|
306
|
+
name: str,
|
|
307
|
+
description: Optional[str],
|
|
308
|
+
snapshot_type: SnapshotType,
|
|
309
|
+
) -> str:
|
|
310
|
+
"""Generate libvirt snapshot XML."""
|
|
311
|
+
desc_xml = f"<description>{description}</description>" if description else ""
|
|
312
|
+
|
|
313
|
+
if snapshot_type == SnapshotType.DISK_ONLY:
|
|
314
|
+
disks_xml = "<disks><disk name='vda' snapshot='internal'/></disks>"
|
|
315
|
+
else:
|
|
316
|
+
disks_xml = ""
|
|
317
|
+
|
|
318
|
+
return f"""
|
|
319
|
+
<domainsnapshot>
|
|
320
|
+
<name>{name}</name>
|
|
321
|
+
{desc_xml}
|
|
322
|
+
{disks_xml}
|
|
323
|
+
</domainsnapshot>
|
|
324
|
+
"""
|
|
325
|
+
|
|
326
|
+
def _save_snapshot_metadata(self, snapshot: Snapshot) -> None:
|
|
327
|
+
"""Save snapshot metadata to disk."""
|
|
328
|
+
vm_dir = self._snapshots_dir / snapshot.vm_name
|
|
329
|
+
vm_dir.mkdir(parents=True, exist_ok=True)
|
|
330
|
+
|
|
331
|
+
meta_file = vm_dir / f"{snapshot.name}.json"
|
|
332
|
+
meta_file.write_text(json.dumps(snapshot.to_dict(), indent=2))
|
|
333
|
+
|
|
334
|
+
def _load_snapshot_metadata(
|
|
335
|
+
self, vm_name: str, name: str
|
|
336
|
+
) -> Optional[Dict[str, Any]]:
|
|
337
|
+
"""Load snapshot metadata from disk."""
|
|
338
|
+
meta_file = self._snapshots_dir / vm_name / f"{name}.json"
|
|
339
|
+
if meta_file.exists():
|
|
340
|
+
try:
|
|
341
|
+
return json.loads(meta_file.read_text())
|
|
342
|
+
except Exception:
|
|
343
|
+
return None
|
|
344
|
+
return None
|
|
345
|
+
|
|
346
|
+
def _delete_snapshot_metadata(self, vm_name: str, name: str) -> None:
|
|
347
|
+
"""Delete snapshot metadata from disk."""
|
|
348
|
+
meta_file = self._snapshots_dir / vm_name / f"{name}.json"
|
|
349
|
+
if meta_file.exists():
|
|
350
|
+
meta_file.unlink()
|
|
351
|
+
|
|
352
|
+
def close(self) -> None:
|
|
353
|
+
if self._conn is not None:
|
|
354
|
+
self._conn.close()
|
|
355
|
+
self._conn = None
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Data models for snapshot management."""
|
|
3
|
+
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SnapshotType(Enum):
|
|
12
|
+
"""Type of snapshot."""
|
|
13
|
+
|
|
14
|
+
DISK_ONLY = "disk" # Only disk state (offline)
|
|
15
|
+
FULL = "full" # Disk + memory + device state (online)
|
|
16
|
+
EXTERNAL = "external" # External snapshot file
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SnapshotState(Enum):
|
|
20
|
+
"""State of snapshot operation."""
|
|
21
|
+
|
|
22
|
+
CREATING = "creating"
|
|
23
|
+
READY = "ready"
|
|
24
|
+
REVERTING = "reverting"
|
|
25
|
+
DELETING = "deleting"
|
|
26
|
+
FAILED = "failed"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class Snapshot:
|
|
31
|
+
"""Represents a VM snapshot."""
|
|
32
|
+
|
|
33
|
+
name: str
|
|
34
|
+
vm_name: str
|
|
35
|
+
snapshot_type: SnapshotType
|
|
36
|
+
state: SnapshotState
|
|
37
|
+
created_at: datetime
|
|
38
|
+
|
|
39
|
+
description: Optional[str] = None
|
|
40
|
+
|
|
41
|
+
# Snapshot hierarchy
|
|
42
|
+
parent_name: Optional[str] = None
|
|
43
|
+
children: List[str] = field(default_factory=list)
|
|
44
|
+
|
|
45
|
+
# Storage info
|
|
46
|
+
disk_path: Optional[Path] = None
|
|
47
|
+
memory_path: Optional[Path] = None
|
|
48
|
+
size_bytes: int = 0
|
|
49
|
+
|
|
50
|
+
# Metadata
|
|
51
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
52
|
+
tags: List[str] = field(default_factory=list)
|
|
53
|
+
|
|
54
|
+
# Auto-snapshot info
|
|
55
|
+
auto_created: bool = False
|
|
56
|
+
auto_policy: Optional[str] = None
|
|
57
|
+
expires_at: Optional[datetime] = None
|
|
58
|
+
|
|
59
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
60
|
+
"""Convert to dictionary for serialization."""
|
|
61
|
+
return {
|
|
62
|
+
"name": self.name,
|
|
63
|
+
"vm_name": self.vm_name,
|
|
64
|
+
"type": self.snapshot_type.value,
|
|
65
|
+
"state": self.state.value,
|
|
66
|
+
"created_at": self.created_at.isoformat(),
|
|
67
|
+
"description": self.description,
|
|
68
|
+
"parent_name": self.parent_name,
|
|
69
|
+
"children": self.children,
|
|
70
|
+
"disk_path": str(self.disk_path) if self.disk_path else None,
|
|
71
|
+
"memory_path": str(self.memory_path) if self.memory_path else None,
|
|
72
|
+
"size_bytes": self.size_bytes,
|
|
73
|
+
"metadata": self.metadata,
|
|
74
|
+
"tags": self.tags,
|
|
75
|
+
"auto_created": self.auto_created,
|
|
76
|
+
"auto_policy": self.auto_policy,
|
|
77
|
+
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
@classmethod
|
|
81
|
+
def from_dict(cls, data: Dict[str, Any]) -> "Snapshot":
|
|
82
|
+
"""Create from dictionary."""
|
|
83
|
+
return cls(
|
|
84
|
+
name=data["name"],
|
|
85
|
+
vm_name=data["vm_name"],
|
|
86
|
+
snapshot_type=SnapshotType(data["type"]),
|
|
87
|
+
state=SnapshotState(data["state"]),
|
|
88
|
+
created_at=datetime.fromisoformat(data["created_at"]),
|
|
89
|
+
description=data.get("description"),
|
|
90
|
+
parent_name=data.get("parent_name"),
|
|
91
|
+
children=data.get("children", []),
|
|
92
|
+
disk_path=Path(data["disk_path"]) if data.get("disk_path") else None,
|
|
93
|
+
memory_path=Path(data["memory_path"]) if data.get("memory_path") else None,
|
|
94
|
+
size_bytes=data.get("size_bytes", 0),
|
|
95
|
+
metadata=data.get("metadata", {}),
|
|
96
|
+
tags=data.get("tags", []),
|
|
97
|
+
auto_created=data.get("auto_created", False),
|
|
98
|
+
auto_policy=data.get("auto_policy"),
|
|
99
|
+
expires_at=(
|
|
100
|
+
datetime.fromisoformat(data["expires_at"])
|
|
101
|
+
if data.get("expires_at")
|
|
102
|
+
else None
|
|
103
|
+
),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def is_expired(self) -> bool:
|
|
108
|
+
"""Check if snapshot has expired."""
|
|
109
|
+
if self.expires_at is None:
|
|
110
|
+
return False
|
|
111
|
+
return datetime.now() > self.expires_at
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def age(self) -> timedelta:
|
|
115
|
+
"""Get snapshot age."""
|
|
116
|
+
return datetime.now() - self.created_at
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@dataclass
|
|
120
|
+
class SnapshotPolicy:
|
|
121
|
+
"""Policy for automatic snapshots."""
|
|
122
|
+
|
|
123
|
+
name: str
|
|
124
|
+
enabled: bool = True
|
|
125
|
+
|
|
126
|
+
# Retention settings
|
|
127
|
+
max_snapshots: int = 10
|
|
128
|
+
max_age_days: int = 30
|
|
129
|
+
min_snapshots: int = 1 # Keep at least N snapshots
|
|
130
|
+
|
|
131
|
+
# Auto-snapshot triggers
|
|
132
|
+
before_operations: List[str] = field(
|
|
133
|
+
default_factory=lambda: ["upgrade", "resize", "config-change"]
|
|
134
|
+
)
|
|
135
|
+
scheduled_interval_hours: Optional[int] = None # e.g., 24 for daily
|
|
136
|
+
|
|
137
|
+
# Naming
|
|
138
|
+
name_prefix: str = "auto-"
|
|
139
|
+
include_timestamp: bool = True
|
|
140
|
+
|
|
141
|
+
# Cleanup
|
|
142
|
+
auto_cleanup: bool = True
|
|
143
|
+
cleanup_on_success: bool = False # Remove pre-operation snapshot if op succeeds
|
|
144
|
+
|
|
145
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
146
|
+
"""Convert to dictionary."""
|
|
147
|
+
return {
|
|
148
|
+
"name": self.name,
|
|
149
|
+
"enabled": self.enabled,
|
|
150
|
+
"max_snapshots": self.max_snapshots,
|
|
151
|
+
"max_age_days": self.max_age_days,
|
|
152
|
+
"min_snapshots": self.min_snapshots,
|
|
153
|
+
"before_operations": self.before_operations,
|
|
154
|
+
"scheduled_interval_hours": self.scheduled_interval_hours,
|
|
155
|
+
"name_prefix": self.name_prefix,
|
|
156
|
+
"include_timestamp": self.include_timestamp,
|
|
157
|
+
"auto_cleanup": self.auto_cleanup,
|
|
158
|
+
"cleanup_on_success": self.cleanup_on_success,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
@classmethod
|
|
162
|
+
def from_dict(cls, data: Dict[str, Any]) -> "SnapshotPolicy":
|
|
163
|
+
"""Create from dictionary."""
|
|
164
|
+
return cls(
|
|
165
|
+
name=data["name"],
|
|
166
|
+
enabled=data.get("enabled", True),
|
|
167
|
+
max_snapshots=data.get("max_snapshots", 10),
|
|
168
|
+
max_age_days=data.get("max_age_days", 30),
|
|
169
|
+
min_snapshots=data.get("min_snapshots", 1),
|
|
170
|
+
before_operations=data.get(
|
|
171
|
+
"before_operations", ["upgrade", "resize", "config-change"]
|
|
172
|
+
),
|
|
173
|
+
scheduled_interval_hours=data.get("scheduled_interval_hours"),
|
|
174
|
+
name_prefix=data.get("name_prefix", "auto-"),
|
|
175
|
+
include_timestamp=data.get("include_timestamp", True),
|
|
176
|
+
auto_cleanup=data.get("auto_cleanup", True),
|
|
177
|
+
cleanup_on_success=data.get("cleanup_on_success", False),
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def generate_snapshot_name(self, operation: Optional[str] = None) -> str:
|
|
181
|
+
"""Generate snapshot name based on policy."""
|
|
182
|
+
parts = [self.name_prefix]
|
|
183
|
+
if operation:
|
|
184
|
+
parts.append(operation)
|
|
185
|
+
if self.include_timestamp:
|
|
186
|
+
parts.append(datetime.now().strftime("%Y%m%d-%H%M%S"))
|
|
187
|
+
return "-".join(parts)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
clonebox/__init__.py,sha256=CyfHVVq6KqBr4CNERBpXk_O6Q5B35q03YpdQbokVvvI,408
|
|
2
2
|
clonebox/__main__.py,sha256=Fcoyzwwyz5-eC_sBlQk5a5RbKx8uodQz5sKJ190U0NU,135
|
|
3
|
-
clonebox/cli.py,sha256=
|
|
3
|
+
clonebox/cli.py,sha256=MeW_Jnbmrp9QrfA72a2pydeeINCY_LNBqJEGaD2rxtE,128687
|
|
4
4
|
clonebox/cloner.py,sha256=2YQO4SHCv0xOsU1hL9IqdgmxxJN-2j75X9pe-LpTpJE,82696
|
|
5
5
|
clonebox/container.py,sha256=tiYK1ZB-DhdD6A2FuMA0h_sRNkUI7KfYcJ0tFOcdyeM,6105
|
|
6
6
|
clonebox/dashboard.py,sha256=dMY6odvPq3j6FronhRRsX7aY3qdCwznB-aCWKEmHDNw,5768
|
|
@@ -8,14 +8,20 @@ clonebox/detector.py,sha256=vS65cvFNPmUBCX1Y_TMTnSRljw6r1Ae9dlVtACs5XFc,23075
|
|
|
8
8
|
clonebox/exporter.py,sha256=WIzVvmA0z_jjrpyXxvnXoLp9oaW6fKS7k0PGwzx_PIM,5629
|
|
9
9
|
clonebox/importer.py,sha256=Q9Uk1IOA41mgGhU4ynW2k-h9GEoGxRKI3c9wWE4uxcA,7097
|
|
10
10
|
clonebox/models.py,sha256=zwejkNtEEO_aPy_Q5UzXG5tszU-c7lkqh9LQus9eWMo,8307
|
|
11
|
+
clonebox/monitor.py,sha256=KQKi63mcz6KULi2SpD5oi1g05CKaFTC2dAyyRJtJX-E,9211
|
|
11
12
|
clonebox/p2p.py,sha256=LPQQ7wNO84yDnpVrGkaRU-FDUzqmC4URdZXVeHsNOew,5889
|
|
12
13
|
clonebox/profiles.py,sha256=UP37fX_rhrG_O9ehNFJBUcULPmUtN1A8KsJ6cM44oK0,1986
|
|
13
14
|
clonebox/validator.py,sha256=CF4hMlY69-AGRH5HdG8HAA9_LNCwDKD4xPlYQPWJ9Rw,36647
|
|
15
|
+
clonebox/health/__init__.py,sha256=hW6MB8qc3pE-Jub1Djnz2G1AGs4Tn4Y2FbuYur6m8aE,394
|
|
16
|
+
clonebox/health/models.py,sha256=sPumwj8S-88KgzSGw1Kq9bBbPVRd2RR0R87Z8hKJ_28,6001
|
|
17
|
+
clonebox/snapshots/__init__.py,sha256=ndlrIavPAiA8z4Ep3-D_EPhOcjNKYFnP3rIpEKaGdb8,273
|
|
18
|
+
clonebox/snapshots/manager.py,sha256=FuJB_q9fUs7GScVdX5vePezBDI9m8zwIrG1BDFvjeNM,11469
|
|
19
|
+
clonebox/snapshots/models.py,sha256=upJhlHLYFWBrMzCMI8Zzd1z66JRV69R2qLDCTrDtJUY,6268
|
|
14
20
|
clonebox/templates/profiles/ml-dev.yaml,sha256=w07MToGh31xtxpjbeXTBk9BkpAN8A3gv8HeA3ESKG9M,461
|
|
15
21
|
clonebox/templates/profiles/web-stack.yaml,sha256=EBnnGMzML5vAjXmIUbCpbTCwmRaNJiuWd3EcL43DOK8,485
|
|
16
|
-
clonebox-1.1.
|
|
17
|
-
clonebox-1.1.
|
|
18
|
-
clonebox-1.1.
|
|
19
|
-
clonebox-1.1.
|
|
20
|
-
clonebox-1.1.
|
|
21
|
-
clonebox-1.1.
|
|
22
|
+
clonebox-1.1.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
23
|
+
clonebox-1.1.4.dist-info/METADATA,sha256=TfNWk7EAyhCtPtEoyERrJz2f2Xhn6Lg08DjfC6zuj0E,47947
|
|
24
|
+
clonebox-1.1.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
25
|
+
clonebox-1.1.4.dist-info/entry_points.txt,sha256=FES95Vi3btfViLEEoHdb8nikNxTqzaooi9ehZw9ZfWI,47
|
|
26
|
+
clonebox-1.1.4.dist-info/top_level.txt,sha256=LdMo2cvCrEcRGH2M8JgQNVsCoszLV0xug6kx1JnaRjo,9
|
|
27
|
+
clonebox-1.1.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|