vm-tool 1.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/README.md +5 -0
- examples/__init__.py +1 -0
- examples/cloud/README.md +3 -0
- examples/cloud/__init__.py +1 -0
- examples/cloud/ssh_identity_file.py +27 -0
- examples/cloud/ssh_password.py +27 -0
- examples/cloud/template_cloud_setup.py +36 -0
- examples/deploy_full_setup.py +44 -0
- examples/docker-compose.example.yml +47 -0
- examples/ec2-setup.sh +95 -0
- examples/github-actions-ec2.yml +245 -0
- examples/github-actions-full-setup.yml +58 -0
- examples/local/.keep +1 -0
- examples/local/README.md +3 -0
- examples/local/__init__.py +1 -0
- examples/local/template_local_setup.py +27 -0
- examples/production-deploy.sh +70 -0
- examples/rollback.sh +52 -0
- examples/setup.sh +52 -0
- examples/ssh_key_management.py +22 -0
- examples/version_check.sh +3 -0
- vm_tool/__init__.py +0 -0
- vm_tool/alerting.py +274 -0
- vm_tool/audit.py +118 -0
- vm_tool/backup.py +125 -0
- vm_tool/benchmarking.py +200 -0
- vm_tool/cli.py +761 -0
- vm_tool/cloud.py +125 -0
- vm_tool/completion.py +200 -0
- vm_tool/compliance.py +104 -0
- vm_tool/config.py +92 -0
- vm_tool/drift.py +98 -0
- vm_tool/generator.py +462 -0
- vm_tool/health.py +197 -0
- vm_tool/history.py +131 -0
- vm_tool/kubernetes.py +89 -0
- vm_tool/metrics.py +183 -0
- vm_tool/notifications.py +152 -0
- vm_tool/plugins.py +119 -0
- vm_tool/policy.py +197 -0
- vm_tool/rbac.py +140 -0
- vm_tool/recovery.py +169 -0
- vm_tool/reporting.py +218 -0
- vm_tool/runner.py +445 -0
- vm_tool/secrets.py +285 -0
- vm_tool/ssh.py +150 -0
- vm_tool/state.py +122 -0
- vm_tool/strategies/__init__.py +16 -0
- vm_tool/strategies/ab_testing.py +258 -0
- vm_tool/strategies/blue_green.py +227 -0
- vm_tool/strategies/canary.py +277 -0
- vm_tool/validation.py +267 -0
- vm_tool/vm_setup/cleanup.yml +27 -0
- vm_tool/vm_setup/docker/create_docker_service.yml +63 -0
- vm_tool/vm_setup/docker/docker_setup.yml +7 -0
- vm_tool/vm_setup/docker/install_docker_and_compose.yml +92 -0
- vm_tool/vm_setup/docker/login_to_docker_hub.yml +6 -0
- vm_tool/vm_setup/github/git_configuration.yml +68 -0
- vm_tool/vm_setup/inventory.yml +1 -0
- vm_tool/vm_setup/k8s.yml +15 -0
- vm_tool/vm_setup/main.yml +27 -0
- vm_tool/vm_setup/monitoring.yml +42 -0
- vm_tool/vm_setup/project_service.yml +17 -0
- vm_tool/vm_setup/push_code.yml +40 -0
- vm_tool/vm_setup/setup.yml +17 -0
- vm_tool/vm_setup/setup_project_env.yml +7 -0
- vm_tool/webhooks.py +83 -0
- vm_tool-1.0.32.dist-info/METADATA +213 -0
- vm_tool-1.0.32.dist-info/RECORD +73 -0
- vm_tool-1.0.32.dist-info/WHEEL +5 -0
- vm_tool-1.0.32.dist-info/entry_points.txt +2 -0
- vm_tool-1.0.32.dist-info/licenses/LICENSE +21 -0
- vm_tool-1.0.32.dist-info/top_level.txt +2 -0
vm_tool/backup.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""Backup and restore functionality for disaster recovery."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import subprocess
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BackupManager:
|
|
14
|
+
"""Manages backups and restores for deployed services."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, backup_dir: Optional[Path] = None):
|
|
17
|
+
if backup_dir is None:
|
|
18
|
+
backup_dir = Path.home() / ".vm_tool" / "backups"
|
|
19
|
+
self.backup_dir = backup_dir
|
|
20
|
+
self.backup_dir.mkdir(parents=True, exist_ok=True)
|
|
21
|
+
|
|
22
|
+
def create_backup(
|
|
23
|
+
self,
|
|
24
|
+
host: str,
|
|
25
|
+
user: str,
|
|
26
|
+
paths: List[str],
|
|
27
|
+
include_volumes: bool = False,
|
|
28
|
+
include_db: bool = False,
|
|
29
|
+
) -> str:
|
|
30
|
+
"""Create a backup of specified paths on remote host."""
|
|
31
|
+
backup_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
32
|
+
backup_path = self.backup_dir / f"{host}_{backup_id}.tar.gz"
|
|
33
|
+
|
|
34
|
+
logger.info(f"Creating backup: {backup_id}")
|
|
35
|
+
|
|
36
|
+
# Build tar command
|
|
37
|
+
tar_paths = " ".join(paths)
|
|
38
|
+
remote_backup = f"/tmp/backup_{backup_id}.tar.gz" # nosec B108
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
# Create tar on remote
|
|
42
|
+
tar_cmd = f"tar -czf {remote_backup} {tar_paths}"
|
|
43
|
+
result = subprocess.run(
|
|
44
|
+
["ssh", f"{user}@{host}", tar_cmd],
|
|
45
|
+
capture_output=True,
|
|
46
|
+
text=True,
|
|
47
|
+
timeout=300,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
if result.returncode != 0:
|
|
51
|
+
raise RuntimeError(f"Backup creation failed: {result.stderr}")
|
|
52
|
+
|
|
53
|
+
# Download backup
|
|
54
|
+
subprocess.run(
|
|
55
|
+
["scp", f"{user}@{host}:{remote_backup}", str(backup_path)],
|
|
56
|
+
check=True,
|
|
57
|
+
timeout=300,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Cleanup remote backup
|
|
61
|
+
subprocess.run(["ssh", f"{user}@{host}", f"rm {remote_backup}"], timeout=30)
|
|
62
|
+
|
|
63
|
+
# Save metadata
|
|
64
|
+
metadata = {
|
|
65
|
+
"id": backup_id,
|
|
66
|
+
"host": host,
|
|
67
|
+
"paths": paths,
|
|
68
|
+
"timestamp": datetime.now().isoformat(),
|
|
69
|
+
"size": backup_path.stat().st_size,
|
|
70
|
+
}
|
|
71
|
+
metadata_file = self.backup_dir / f"{host}_{backup_id}.json"
|
|
72
|
+
with open(metadata_file, "w") as f:
|
|
73
|
+
json.dump(metadata, f, indent=2)
|
|
74
|
+
|
|
75
|
+
logger.info(f"✅ Backup created: {backup_id}")
|
|
76
|
+
return backup_id
|
|
77
|
+
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Backup failed: {e}")
|
|
80
|
+
raise
|
|
81
|
+
|
|
82
|
+
def list_backups(self, host: Optional[str] = None) -> List[Dict]:
|
|
83
|
+
"""List available backups."""
|
|
84
|
+
backups = []
|
|
85
|
+
for metadata_file in self.backup_dir.glob("*.json"):
|
|
86
|
+
with open(metadata_file, "r") as f:
|
|
87
|
+
backup = json.load(f)
|
|
88
|
+
if host is None or backup["host"] == host:
|
|
89
|
+
backups.append(backup)
|
|
90
|
+
|
|
91
|
+
return sorted(backups, key=lambda x: x["timestamp"], reverse=True)
|
|
92
|
+
|
|
93
|
+
def restore_backup(self, backup_id: str, host: str, user: str):
|
|
94
|
+
"""Restore a backup to the specified host."""
|
|
95
|
+
# Find backup file
|
|
96
|
+
backup_files = list(self.backup_dir.glob(f"*_{backup_id}.tar.gz"))
|
|
97
|
+
if not backup_files:
|
|
98
|
+
raise FileNotFoundError(f"Backup not found: {backup_id}")
|
|
99
|
+
|
|
100
|
+
backup_file = backup_files[0]
|
|
101
|
+
remote_backup = f"/tmp/restore_{backup_id}.tar.gz" # nosec B108
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Upload backup
|
|
105
|
+
subprocess.run(
|
|
106
|
+
["scp", str(backup_file), f"{user}@{host}:{remote_backup}"],
|
|
107
|
+
check=True,
|
|
108
|
+
timeout=300,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Extract on remote
|
|
112
|
+
subprocess.run(
|
|
113
|
+
["ssh", f"{user}@{host}", f"tar -xzf {remote_backup} -C /"],
|
|
114
|
+
check=True,
|
|
115
|
+
timeout=300,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Cleanup
|
|
119
|
+
subprocess.run(["ssh", f"{user}@{host}", f"rm {remote_backup}"], timeout=30)
|
|
120
|
+
|
|
121
|
+
logger.info(f"✅ Backup restored: {backup_id}")
|
|
122
|
+
|
|
123
|
+
except Exception as e:
|
|
124
|
+
logger.error(f"Restore failed: {e}")
|
|
125
|
+
raise
|
vm_tool/benchmarking.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"""Performance benchmarking for deployments."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from statistics import mean, median, stdev
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class BenchmarkResult:
|
|
14
|
+
"""Result of a benchmark run."""
|
|
15
|
+
|
|
16
|
+
name: str
|
|
17
|
+
duration: float
|
|
18
|
+
success: bool
|
|
19
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PerformanceBenchmark:
|
|
23
|
+
"""Benchmark deployment performance."""
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
self.results: List[BenchmarkResult] = []
|
|
27
|
+
|
|
28
|
+
def benchmark_deployment(self, host: str, compose_file: str) -> BenchmarkResult:
|
|
29
|
+
"""Benchmark a deployment."""
|
|
30
|
+
logger.info(f"🏃 Benchmarking deployment to {host}")
|
|
31
|
+
|
|
32
|
+
start_time = time.time()
|
|
33
|
+
success = False
|
|
34
|
+
error = None
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
# Simulate deployment (would call actual deploy function)
|
|
38
|
+
logger.info(" Running deployment...")
|
|
39
|
+
time.sleep(0.1) # Placeholder
|
|
40
|
+
success = True
|
|
41
|
+
except Exception as e:
|
|
42
|
+
error = str(e)
|
|
43
|
+
logger.error(f" Deployment failed: {e}")
|
|
44
|
+
|
|
45
|
+
duration = time.time() - start_time
|
|
46
|
+
|
|
47
|
+
result = BenchmarkResult(
|
|
48
|
+
name=f"deployment_{host}",
|
|
49
|
+
duration=duration,
|
|
50
|
+
success=success,
|
|
51
|
+
metadata={"host": host, "compose_file": compose_file, "error": error},
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
self.results.append(result)
|
|
55
|
+
logger.info(f" Completed in {duration:.2f}s")
|
|
56
|
+
|
|
57
|
+
return result
|
|
58
|
+
|
|
59
|
+
def benchmark_health_check(
|
|
60
|
+
self, host: str, endpoint: str = "/health"
|
|
61
|
+
) -> BenchmarkResult:
|
|
62
|
+
"""Benchmark health check response time."""
|
|
63
|
+
logger.info(f"🏃 Benchmarking health check on {host}")
|
|
64
|
+
|
|
65
|
+
start_time = time.time()
|
|
66
|
+
success = False
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
from vm_tool.health import check_http
|
|
70
|
+
|
|
71
|
+
success = check_http(f"http://{host}{endpoint}")
|
|
72
|
+
except Exception as e:
|
|
73
|
+
logger.error(f" Health check failed: {e}")
|
|
74
|
+
|
|
75
|
+
duration = time.time() - start_time
|
|
76
|
+
|
|
77
|
+
result = BenchmarkResult(
|
|
78
|
+
name=f"health_check_{host}",
|
|
79
|
+
duration=duration,
|
|
80
|
+
success=success,
|
|
81
|
+
metadata={"host": host, "endpoint": endpoint},
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
self.results.append(result)
|
|
85
|
+
logger.info(f" Response time: {duration*1000:.2f}ms")
|
|
86
|
+
|
|
87
|
+
return result
|
|
88
|
+
|
|
89
|
+
def run_load_test(
|
|
90
|
+
self, host: str, requests: int = 100, concurrent: int = 10
|
|
91
|
+
) -> Dict[str, Any]:
|
|
92
|
+
"""Run load test against host."""
|
|
93
|
+
logger.info(
|
|
94
|
+
f"🏃 Running load test: {requests} requests, {concurrent} concurrent"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
import concurrent.futures
|
|
98
|
+
|
|
99
|
+
durations = []
|
|
100
|
+
successes = 0
|
|
101
|
+
failures = 0
|
|
102
|
+
|
|
103
|
+
def make_request():
|
|
104
|
+
start = time.time()
|
|
105
|
+
try:
|
|
106
|
+
from vm_tool.health import check_http
|
|
107
|
+
|
|
108
|
+
if check_http(f"http://{host}/"):
|
|
109
|
+
return time.time() - start, True
|
|
110
|
+
except:
|
|
111
|
+
pass
|
|
112
|
+
return time.time() - start, False
|
|
113
|
+
|
|
114
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent) as executor:
|
|
115
|
+
futures = [executor.submit(make_request) for _ in range(requests)]
|
|
116
|
+
|
|
117
|
+
for future in concurrent.futures.as_completed(futures):
|
|
118
|
+
duration, success = future.result()
|
|
119
|
+
durations.append(duration)
|
|
120
|
+
if success:
|
|
121
|
+
successes += 1
|
|
122
|
+
else:
|
|
123
|
+
failures += 1
|
|
124
|
+
|
|
125
|
+
results = {
|
|
126
|
+
"total_requests": requests,
|
|
127
|
+
"successful": successes,
|
|
128
|
+
"failed": failures,
|
|
129
|
+
"success_rate": (successes / requests * 100) if requests > 0 else 0,
|
|
130
|
+
"avg_response_time": mean(durations) if durations else 0,
|
|
131
|
+
"median_response_time": median(durations) if durations else 0,
|
|
132
|
+
"min_response_time": min(durations) if durations else 0,
|
|
133
|
+
"max_response_time": max(durations) if durations else 0,
|
|
134
|
+
"std_dev": stdev(durations) if len(durations) > 1 else 0,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
logger.info(f" Success rate: {results['success_rate']:.1f}%")
|
|
138
|
+
logger.info(f" Avg response: {results['avg_response_time']*1000:.2f}ms")
|
|
139
|
+
|
|
140
|
+
return results
|
|
141
|
+
|
|
142
|
+
def generate_report(self) -> str:
|
|
143
|
+
"""Generate benchmark report."""
|
|
144
|
+
if not self.results:
|
|
145
|
+
return "No benchmark results available"
|
|
146
|
+
|
|
147
|
+
successful = [r for r in self.results if r.success]
|
|
148
|
+
failed = [r for r in self.results if not r.success]
|
|
149
|
+
|
|
150
|
+
report = f"""
|
|
151
|
+
Performance Benchmark Report
|
|
152
|
+
===========================
|
|
153
|
+
Total Benchmarks: {len(self.results)}
|
|
154
|
+
Successful: {len(successful)}
|
|
155
|
+
Failed: {len(failed)}
|
|
156
|
+
|
|
157
|
+
Results:
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
for result in self.results:
|
|
161
|
+
status = "✅" if result.success else "❌"
|
|
162
|
+
report += f"\n{status} {result.name}: {result.duration:.3f}s"
|
|
163
|
+
|
|
164
|
+
if successful:
|
|
165
|
+
durations = [r.duration for r in successful]
|
|
166
|
+
report += f"""
|
|
167
|
+
|
|
168
|
+
Statistics (successful runs):
|
|
169
|
+
Average: {mean(durations):.3f}s
|
|
170
|
+
Median: {median(durations):.3f}s
|
|
171
|
+
Min: {min(durations):.3f}s
|
|
172
|
+
Max: {max(durations):.3f}s
|
|
173
|
+
"""
|
|
174
|
+
if len(durations) > 1:
|
|
175
|
+
report += f" Std Dev: {stdev(durations):.3f}s\n"
|
|
176
|
+
|
|
177
|
+
return report
|
|
178
|
+
|
|
179
|
+
def compare_with_baseline(self, baseline_duration: float) -> Dict[str, Any]:
|
|
180
|
+
"""Compare current results with baseline."""
|
|
181
|
+
if not self.results:
|
|
182
|
+
return {"error": "No results to compare"}
|
|
183
|
+
|
|
184
|
+
successful = [r for r in self.results if r.success]
|
|
185
|
+
if not successful:
|
|
186
|
+
return {"error": "No successful results"}
|
|
187
|
+
|
|
188
|
+
avg_duration = mean([r.duration for r in successful])
|
|
189
|
+
difference = avg_duration - baseline_duration
|
|
190
|
+
percentage_change = (
|
|
191
|
+
(difference / baseline_duration * 100) if baseline_duration > 0 else 0
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
return {
|
|
195
|
+
"baseline_duration": baseline_duration,
|
|
196
|
+
"current_avg_duration": avg_duration,
|
|
197
|
+
"difference": difference,
|
|
198
|
+
"percentage_change": percentage_change,
|
|
199
|
+
"improved": difference < 0,
|
|
200
|
+
}
|