golem-vm-provider 0.1.24__py3-none-any.whl → 0.1.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {golem_vm_provider-0.1.24.dist-info → golem_vm_provider-0.1.27.dist-info}/METADATA +17 -10
- golem_vm_provider-0.1.27.dist-info/RECORD +38 -0
- {golem_vm_provider-0.1.24.dist-info → golem_vm_provider-0.1.27.dist-info}/WHEEL +1 -1
- golem_vm_provider-0.1.27.dist-info/entry_points.txt +4 -0
- provider/api/models.py +10 -10
- provider/api/routes.py +89 -95
- provider/config.py +101 -21
- provider/container.py +84 -0
- provider/discovery/__init__.py +8 -2
- provider/discovery/advertiser.py +41 -63
- provider/discovery/golem_base_advertiser.py +135 -0
- provider/discovery/golem_base_utils.py +10 -0
- provider/discovery/resource_monitor.py +34 -0
- provider/discovery/resource_tracker.py +1 -1
- provider/discovery/service.py +24 -0
- provider/main.py +88 -171
- provider/security/ethereum.py +9 -13
- provider/security/faucet.py +132 -0
- provider/service.py +67 -0
- provider/utils/__init__.py +0 -0
- provider/utils/logging.py +11 -27
- provider/utils/port_display.py +27 -1
- provider/utils/retry.py +39 -0
- provider/vm/__init__.py +1 -1
- provider/vm/models.py +13 -12
- provider/vm/multipass.py +2 -416
- provider/vm/multipass_adapter.py +221 -0
- provider/vm/name_mapper.py +5 -5
- provider/vm/port_manager.py +67 -26
- provider/vm/provider.py +48 -0
- provider/vm/proxy_manager.py +4 -3
- provider/vm/service.py +91 -0
- golem_vm_provider-0.1.24.dist-info/RECORD +0 -27
- golem_vm_provider-0.1.24.dist-info/entry_points.txt +0 -3
@@ -0,0 +1,221 @@
|
|
1
|
+
import json
|
2
|
+
import uuid
|
3
|
+
import subprocess
|
4
|
+
from pathlib import Path
|
5
|
+
import asyncio
|
6
|
+
from typing import Dict, List, Optional
|
7
|
+
from ..utils.retry import async_retry_unless_not_found
|
8
|
+
|
9
|
+
from ..config import settings
|
10
|
+
from ..utils.logging import setup_logger
|
11
|
+
from .models import VMConfig, VMInfo, VMResources, VMStatus, VMError, VMNotFoundError
|
12
|
+
from .provider import VMProvider
|
13
|
+
|
14
|
+
logger = setup_logger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class MultipassError(VMError):
|
18
|
+
"""Raised when multipass operations fail."""
|
19
|
+
pass
|
20
|
+
|
21
|
+
|
22
|
+
class MultipassAdapter(VMProvider):
|
23
|
+
"""Manages VMs using Multipass."""
|
24
|
+
|
25
|
+
def __init__(self, proxy_manager, name_mapper):
|
26
|
+
self.multipass_path = settings.MULTIPASS_BINARY_PATH
|
27
|
+
self.proxy_manager = proxy_manager
|
28
|
+
self.name_mapper = name_mapper
|
29
|
+
|
30
|
+
async def _run_multipass(self, args: List[str], check: bool = True) -> subprocess.CompletedProcess:
|
31
|
+
"""Run a multipass command."""
|
32
|
+
# Commands that produce JSON or version info that we need to parse.
|
33
|
+
commands_to_capture = ['info', 'version']
|
34
|
+
should_capture = args[0] in commands_to_capture
|
35
|
+
|
36
|
+
# We add a timeout to the launch command to prevent it from hanging indefinitely
|
37
|
+
# e.g. during image download. 300 seconds = 5 minutes.
|
38
|
+
timeout = 300 if args[0] == 'launch' else None
|
39
|
+
|
40
|
+
try:
|
41
|
+
return await asyncio.to_thread(
|
42
|
+
subprocess.run,
|
43
|
+
[self.multipass_path, *args],
|
44
|
+
capture_output=should_capture,
|
45
|
+
text=True,
|
46
|
+
check=check,
|
47
|
+
timeout=timeout
|
48
|
+
)
|
49
|
+
except subprocess.CalledProcessError as e:
|
50
|
+
stderr = e.stderr if should_capture and e.stderr else "No stderr captured. See provider logs for command output."
|
51
|
+
raise MultipassError(f"Multipass command failed: {stderr}")
|
52
|
+
except subprocess.TimeoutExpired as e:
|
53
|
+
stderr = e.stderr if should_capture and e.stderr else "No stderr captured. See provider logs for command output."
|
54
|
+
raise MultipassError(f"Multipass command '{' '.join(args)}' timed out after {timeout} seconds. Stderr: {stderr}")
|
55
|
+
|
56
|
+
@async_retry_unless_not_found(retries=5, delay=2.0)
|
57
|
+
async def _get_vm_info(self, vm_id: str) -> Dict:
|
58
|
+
"""Get detailed information about a VM."""
|
59
|
+
try:
|
60
|
+
result = await self._run_multipass(["info", vm_id, "--format", "json"])
|
61
|
+
logger.info(f"Raw multipass info for {vm_id}: {result.stdout}")
|
62
|
+
info = json.loads(result.stdout)
|
63
|
+
vm_info = info["info"][vm_id]
|
64
|
+
essential_fields = ["state", "ipv4", "cpu_count", "memory", "disks"]
|
65
|
+
if not all(field in vm_info for field in essential_fields):
|
66
|
+
raise KeyError(f"Essential fields missing from VM info. Got: {list(vm_info.keys())}")
|
67
|
+
return vm_info
|
68
|
+
except MultipassError as e:
|
69
|
+
if "does not exist" in str(e):
|
70
|
+
raise VMNotFoundError(f"VM {vm_id} not found in multipass") from e
|
71
|
+
raise
|
72
|
+
except (json.JSONDecodeError, KeyError) as e:
|
73
|
+
raise MultipassError(f"Failed to parse VM info or essential fields are missing: {e}")
|
74
|
+
|
75
|
+
async def initialize(self) -> None:
|
76
|
+
"""Initialize the VM provider."""
|
77
|
+
try:
|
78
|
+
result = await self._run_multipass(["version"])
|
79
|
+
logger.info(f"🔧 Using Multipass version: {result.stdout.strip()}")
|
80
|
+
except (subprocess.CalledProcessError, FileNotFoundError) as e:
|
81
|
+
raise MultipassError(f"Failed to verify multipass installation: {e}")
|
82
|
+
|
83
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
84
|
+
"""Create a new VM."""
|
85
|
+
multipass_name = f"golem-{uuid.uuid4()}"
|
86
|
+
await self.name_mapper.add_mapping(config.name, multipass_name)
|
87
|
+
|
88
|
+
launch_cmd = [
|
89
|
+
"launch",
|
90
|
+
config.image,
|
91
|
+
"--name", multipass_name,
|
92
|
+
"--cloud-init", config.cloud_init_path,
|
93
|
+
"--cpus", str(config.resources.cpu),
|
94
|
+
"--memory", f"{config.resources.memory}G",
|
95
|
+
"--disk", f"{config.resources.storage}G"
|
96
|
+
]
|
97
|
+
try:
|
98
|
+
logger.info(f"Running multipass command: {' '.join(launch_cmd)}")
|
99
|
+
await self._run_multipass(launch_cmd)
|
100
|
+
logger.info(f"VM {multipass_name} launched, waiting for it to be ready...")
|
101
|
+
|
102
|
+
ip_address = None
|
103
|
+
max_retries = 15
|
104
|
+
retry_delay = 5 # seconds
|
105
|
+
for attempt in range(max_retries):
|
106
|
+
try:
|
107
|
+
info = await self._get_vm_info(multipass_name)
|
108
|
+
if info.get("state", "").lower() == "running" and info.get("ipv4"):
|
109
|
+
ip_address = info["ipv4"][0]
|
110
|
+
break
|
111
|
+
logger.debug(f"VM {config.name} status is {info.get('state')}, waiting...")
|
112
|
+
except (MultipassError, VMNotFoundError):
|
113
|
+
logger.debug(f"VM {config.name} not found yet, retrying in {retry_delay}s...")
|
114
|
+
|
115
|
+
await asyncio.sleep(retry_delay)
|
116
|
+
|
117
|
+
if not ip_address:
|
118
|
+
raise MultipassError(f"VM {config.name} did not become ready or get an IP in time.")
|
119
|
+
|
120
|
+
# Configure proxy to allocate a port
|
121
|
+
if not await self.proxy_manager.add_vm(multipass_name, ip_address):
|
122
|
+
raise MultipassError(f"Failed to configure proxy for VM {multipass_name}")
|
123
|
+
|
124
|
+
# Now get the full status, which will include the allocated port
|
125
|
+
vm_info = await self.get_vm_status(multipass_name)
|
126
|
+
logger.info(f"Successfully created VM: {vm_info.dict()}")
|
127
|
+
return vm_info
|
128
|
+
|
129
|
+
except Exception as e:
|
130
|
+
logger.error(f"VM creation for {config.name} failed. Cleaning up.", exc_info=True)
|
131
|
+
await self._run_multipass(["delete", multipass_name, "--purge"], check=False)
|
132
|
+
await self.proxy_manager.remove_vm(multipass_name)
|
133
|
+
await self.name_mapper.remove_mapping(config.name)
|
134
|
+
raise MultipassError(f"Failed to create VM {config.name}: {e}") from e
|
135
|
+
|
136
|
+
async def delete_vm(self, multipass_name: str) -> None:
|
137
|
+
"""Delete a VM."""
|
138
|
+
requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
|
139
|
+
if not requestor_name:
|
140
|
+
logger.warning(f"No mapping found for {multipass_name}, cannot remove mapping.")
|
141
|
+
else:
|
142
|
+
await self.name_mapper.remove_mapping(requestor_name)
|
143
|
+
await self._run_multipass(["delete", multipass_name, "--purge"], check=False)
|
144
|
+
|
145
|
+
async def list_vms(self) -> List[VMInfo]:
|
146
|
+
"""List all VMs."""
|
147
|
+
all_mappings = self.name_mapper.list_mappings()
|
148
|
+
vms = []
|
149
|
+
for requestor_name in all_mappings.keys():
|
150
|
+
try:
|
151
|
+
vm_info = await self.get_vm_status(requestor_name)
|
152
|
+
vms.append(vm_info)
|
153
|
+
except VMNotFoundError:
|
154
|
+
logger.warning(f"VM {requestor_name} not found, but a mapping exists. It may have been deleted externally.")
|
155
|
+
return vms
|
156
|
+
|
157
|
+
async def start_vm(self, multipass_name: str) -> VMInfo:
|
158
|
+
"""Start a VM."""
|
159
|
+
await self._run_multipass(["start", multipass_name])
|
160
|
+
return await self.get_vm_status(multipass_name)
|
161
|
+
|
162
|
+
async def stop_vm(self, multipass_name: str) -> VMInfo:
|
163
|
+
"""Stop a VM."""
|
164
|
+
await self._run_multipass(["stop", multipass_name])
|
165
|
+
return await self.get_vm_status(multipass_name)
|
166
|
+
|
167
|
+
async def get_vm_status(self, multipass_name: str) -> VMInfo:
|
168
|
+
"""Get the status of a VM."""
|
169
|
+
try:
|
170
|
+
info = await self._get_vm_info(multipass_name)
|
171
|
+
except MultipassError:
|
172
|
+
raise VMNotFoundError(f"VM {multipass_name} not found in multipass")
|
173
|
+
|
174
|
+
requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
|
175
|
+
if not requestor_name:
|
176
|
+
raise VMNotFoundError(f"Mapping for VM {multipass_name} not found")
|
177
|
+
|
178
|
+
ipv4 = info.get("ipv4")
|
179
|
+
ip_address = ipv4[0] if ipv4 else None
|
180
|
+
logger.debug(f"Parsed VM info for {requestor_name}: {info}")
|
181
|
+
|
182
|
+
disks_info = info.get("disks", {})
|
183
|
+
total_storage = sum(int(disk.get("total", 0)) for disk in disks_info.values())
|
184
|
+
vm_info_obj = VMInfo(
|
185
|
+
id=requestor_name,
|
186
|
+
name=requestor_name,
|
187
|
+
status=VMStatus(info["state"].lower()),
|
188
|
+
resources=VMResources(
|
189
|
+
cpu=int(info.get("cpu_count", "1")),
|
190
|
+
memory=round(info.get("memory", {}).get("total", 1024**3) / (1024**3)),
|
191
|
+
storage=round(total_storage / (1024**3)) if total_storage > 0 else 10
|
192
|
+
),
|
193
|
+
ip_address=ip_address,
|
194
|
+
ssh_port=self.proxy_manager.get_port(multipass_name)
|
195
|
+
)
|
196
|
+
logger.debug(f"Constructed VMInfo object: {vm_info_obj.dict()}")
|
197
|
+
return vm_info_obj
|
198
|
+
|
199
|
+
async def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
200
|
+
"""Get resources for all running VMs."""
|
201
|
+
all_mappings = self.name_mapper.list_mappings()
|
202
|
+
vm_resources = {}
|
203
|
+
for requestor_name, multipass_name in all_mappings.items():
|
204
|
+
try:
|
205
|
+
info = await self._get_vm_info(multipass_name)
|
206
|
+
disks_info = info.get("disks", {})
|
207
|
+
total_storage = sum(int(disk.get("total", 0)) for disk in disks_info.values())
|
208
|
+
vm_resources[requestor_name] = VMResources(
|
209
|
+
cpu=int(info.get("cpu_count", "1")),
|
210
|
+
memory=round(info.get("memory", {}).get("total", 1024**3) / (1024**3)),
|
211
|
+
storage=round(total_storage / (1024**3)) if total_storage > 0 else 10
|
212
|
+
)
|
213
|
+
except (MultipassError, VMNotFoundError):
|
214
|
+
logger.warning(f"Could not retrieve resources for VM {requestor_name} ({multipass_name}). It may have been deleted.")
|
215
|
+
except Exception as e:
|
216
|
+
logger.error(f"Failed to get info for VM {requestor_name}: {e}")
|
217
|
+
return vm_resources
|
218
|
+
|
219
|
+
async def cleanup(self) -> None:
|
220
|
+
"""Cleanup resources used by the provider."""
|
221
|
+
pass
|
provider/vm/name_mapper.py
CHANGED
@@ -9,21 +9,21 @@ logger = logging.getLogger(__name__)
|
|
9
9
|
class VMNameMapper:
|
10
10
|
"""Maps between requestor VM names and multipass VM names."""
|
11
11
|
|
12
|
-
def __init__(self,
|
12
|
+
def __init__(self, db_path: Optional[Path] = None):
|
13
13
|
"""Initialize name mapper.
|
14
14
|
|
15
15
|
Args:
|
16
|
-
|
16
|
+
db_path: Optional path to persist mappings
|
17
17
|
"""
|
18
18
|
self._name_map: Dict[str, str] = {} # requestor_name -> multipass_name
|
19
19
|
self._reverse_map: Dict[str, str] = {} # multipass_name -> requestor_name
|
20
20
|
self._lock = asyncio.Lock()
|
21
|
-
self._storage_path =
|
21
|
+
self._storage_path = db_path
|
22
22
|
|
23
23
|
# Load existing mappings if storage path provided
|
24
|
-
if
|
24
|
+
if db_path and db_path.exists():
|
25
25
|
try:
|
26
|
-
with open(
|
26
|
+
with open(db_path) as f:
|
27
27
|
data = json.load(f)
|
28
28
|
self._name_map = data.get('name_map', {})
|
29
29
|
self._reverse_map = data.get('reverse_map', {})
|
provider/vm/port_manager.py
CHANGED
@@ -24,7 +24,8 @@ class PortManager:
|
|
24
24
|
state_file: Optional[str] = None,
|
25
25
|
port_check_servers: Optional[List[str]] = None,
|
26
26
|
discovery_port: Optional[int] = None,
|
27
|
-
existing_ports: Optional[Set[int]] = None
|
27
|
+
existing_ports: Optional[Set[int]] = None,
|
28
|
+
skip_verification: bool = False
|
28
29
|
):
|
29
30
|
"""Initialize the port manager.
|
30
31
|
|
@@ -46,11 +47,15 @@ class PortManager:
|
|
46
47
|
self._existing_ports = existing_ports or set()
|
47
48
|
|
48
49
|
# Initialize port verifier with default servers
|
49
|
-
|
50
|
-
"http://localhost:9000"
|
51
|
-
|
52
|
-
|
50
|
+
if settings.DEV_MODE:
|
51
|
+
self.port_check_servers = ["http://localhost:9000"]
|
52
|
+
else:
|
53
|
+
self.port_check_servers = port_check_servers or [
|
54
|
+
"http://localhost:9000", # Local development server
|
55
|
+
"http://195.201.39.101:9000", # Production servers
|
56
|
+
]
|
53
57
|
self.discovery_port = discovery_port or settings.PORT
|
58
|
+
self.skip_verification = skip_verification
|
54
59
|
self.port_verifier = PortVerifier(
|
55
60
|
self.port_check_servers,
|
56
61
|
discovery_port=self.discovery_port
|
@@ -76,20 +81,48 @@ class PortManager:
|
|
76
81
|
display = PortVerificationDisplay(
|
77
82
|
provider_port=self.discovery_port,
|
78
83
|
port_range_start=self.start_port,
|
79
|
-
port_range_end=self.end_port
|
84
|
+
port_range_end=self.end_port,
|
85
|
+
skip_verification=self.skip_verification
|
80
86
|
)
|
81
87
|
display.print_header()
|
82
88
|
|
83
|
-
#
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
89
|
+
# If verification is skipped, mark all ports as verified
|
90
|
+
if self.skip_verification:
|
91
|
+
logger.warning("⚠️ Port verification is disabled in development mode")
|
92
|
+
logger.warning(" All ports will be considered available")
|
93
|
+
logger.warning(" This should only be used for development/testing")
|
94
|
+
|
95
|
+
# Mark all ports as verified
|
96
|
+
self.verified_ports = set(range(self.start_port, self.end_port))
|
97
|
+
|
98
|
+
# In development mode, we don't need to create any results
|
99
|
+
# The display will handle development mode differently
|
100
|
+
results = {}
|
101
|
+
else:
|
102
|
+
# Verify all ports in range, including existing ones
|
103
|
+
ssh_ports = list(range(self.start_port, self.end_port))
|
104
|
+
logger.info(f"Starting port verification...")
|
105
|
+
logger.info(f"SSH ports range: {self.start_port}-{self.end_port}")
|
106
|
+
logger.info(
|
107
|
+
f"Using port check servers: {', '.join(self.port_check_servers)}")
|
108
|
+
|
109
|
+
# Clear existing verified ports before verification
|
110
|
+
self.verified_ports.clear()
|
111
|
+
results = {}
|
112
|
+
if not self.skip_verification:
|
113
|
+
try:
|
114
|
+
results = await self.port_verifier.verify_ports(ssh_ports)
|
115
|
+
except RuntimeError as e:
|
116
|
+
logger.error(f"Port verification failed: {e}")
|
117
|
+
display.print_summary(
|
118
|
+
PortVerificationResult(
|
119
|
+
port=self.discovery_port,
|
120
|
+
accessible=False,
|
121
|
+
error=str(e)
|
122
|
+
),
|
123
|
+
{}
|
124
|
+
)
|
125
|
+
return False
|
93
126
|
|
94
127
|
# Add provider port as verified since we already checked it
|
95
128
|
results[self.discovery_port] = PortVerificationResult(
|
@@ -141,13 +174,17 @@ class PortManager:
|
|
141
174
|
# Print precise summary of current status
|
142
175
|
display.print_summary(discovery_result, ssh_results)
|
143
176
|
|
144
|
-
if
|
145
|
-
logger.
|
146
|
-
return
|
177
|
+
if self.skip_verification:
|
178
|
+
logger.info(f"Port verification skipped - all {len(self.verified_ports)} ports marked as available")
|
179
|
+
return True
|
180
|
+
else:
|
181
|
+
if not self.verified_ports:
|
182
|
+
logger.error("No SSH ports were verified as accessible")
|
183
|
+
return False
|
147
184
|
|
148
|
-
|
149
|
-
|
150
|
-
|
185
|
+
logger.info(
|
186
|
+
f"Successfully verified {len(self.verified_ports)} SSH ports")
|
187
|
+
return True
|
151
188
|
|
152
189
|
def _load_state(self) -> None:
|
153
190
|
"""Load port assignments from state file."""
|
@@ -203,7 +240,8 @@ class PortManager:
|
|
203
240
|
return port
|
204
241
|
else:
|
205
242
|
# Port is in use, remove from verified ports
|
206
|
-
self.
|
243
|
+
if not self.skip_verification:
|
244
|
+
self.verified_ports.remove(port)
|
207
245
|
self._used_ports.pop(vm_id)
|
208
246
|
except Exception as e:
|
209
247
|
logger.debug(f"Failed to check port {port}: {e}")
|
@@ -216,7 +254,9 @@ class PortManager:
|
|
216
254
|
used_ports = self._get_used_ports()
|
217
255
|
|
218
256
|
# Find first available verified port
|
219
|
-
|
257
|
+
ports_to_check = sorted(list(self.verified_ports)) if not self.skip_verification else range(
|
258
|
+
self.start_port, self.end_port)
|
259
|
+
for port in ports_to_check:
|
220
260
|
if port not in used_ports:
|
221
261
|
# Quick check if port is actually available
|
222
262
|
try:
|
@@ -230,11 +270,12 @@ class PortManager:
|
|
230
270
|
self._used_ports[vm_id] = port
|
231
271
|
self._save_state()
|
232
272
|
logger.info(
|
233
|
-
f"Allocated
|
273
|
+
f"Allocated port {port} for VM {vm_id}")
|
234
274
|
return port
|
235
275
|
else:
|
236
276
|
# Port is in use, remove from verified ports
|
237
|
-
self.verified_ports
|
277
|
+
if not self.skip_verification and port in self.verified_ports:
|
278
|
+
self.verified_ports.remove(port)
|
238
279
|
except Exception as e:
|
239
280
|
logger.debug(f"Failed to check port {port}: {e}")
|
240
281
|
continue
|
provider/vm/provider.py
ADDED
@@ -0,0 +1,48 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Dict, List
|
3
|
+
|
4
|
+
from .models import VMConfig, VMInfo, VMResources
|
5
|
+
|
6
|
+
|
7
|
+
class VMProvider(ABC):
|
8
|
+
"""Abstract base class for VM providers."""
|
9
|
+
|
10
|
+
@abstractmethod
|
11
|
+
async def initialize(self) -> None:
|
12
|
+
"""Initialize the VM provider."""
|
13
|
+
pass
|
14
|
+
|
15
|
+
@abstractmethod
|
16
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
17
|
+
"""Create a new VM."""
|
18
|
+
pass
|
19
|
+
|
20
|
+
@abstractmethod
|
21
|
+
async def delete_vm(self, vm_id: str) -> None:
|
22
|
+
"""Delete a VM."""
|
23
|
+
pass
|
24
|
+
|
25
|
+
@abstractmethod
|
26
|
+
async def start_vm(self, vm_id: str) -> VMInfo:
|
27
|
+
"""Start a VM."""
|
28
|
+
pass
|
29
|
+
|
30
|
+
@abstractmethod
|
31
|
+
async def stop_vm(self, vm_id: str) -> VMInfo:
|
32
|
+
"""Stop a VM."""
|
33
|
+
pass
|
34
|
+
|
35
|
+
@abstractmethod
|
36
|
+
async def get_vm_status(self, vm_id: str) -> VMInfo:
|
37
|
+
"""Get the status of a VM."""
|
38
|
+
pass
|
39
|
+
|
40
|
+
@abstractmethod
|
41
|
+
def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
42
|
+
"""Get resources for all running VMs."""
|
43
|
+
pass
|
44
|
+
|
45
|
+
@abstractmethod
|
46
|
+
async def cleanup(self) -> None:
|
47
|
+
"""Cleanup resources used by the provider."""
|
48
|
+
pass
|
provider/vm/proxy_manager.py
CHANGED
@@ -309,10 +309,11 @@ class PythonProxyManager:
|
|
309
309
|
try:
|
310
310
|
# Use provided port or allocate one
|
311
311
|
if port is None:
|
312
|
-
|
313
|
-
if
|
312
|
+
allocated_port = self.port_manager.allocate_port(vm_id)
|
313
|
+
if allocated_port is None:
|
314
314
|
logger.error(f"Failed to allocate port for VM {vm_id}")
|
315
315
|
return False
|
316
|
+
port = allocated_port
|
316
317
|
|
317
318
|
# Create and start proxy server
|
318
319
|
proxy = ProxyServer(port, vm_ip)
|
@@ -327,7 +328,7 @@ class PythonProxyManager:
|
|
327
328
|
except Exception as e:
|
328
329
|
logger.error(f"Failed to configure proxy for VM {vm_id}: {e}")
|
329
330
|
# Only deallocate if we allocated the port ourselves
|
330
|
-
if
|
331
|
+
if 'allocated_port' in locals() and allocated_port:
|
331
332
|
self.port_manager.deallocate_port(vm_id)
|
332
333
|
return False
|
333
334
|
|
provider/vm/service.py
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
from datetime import datetime
|
2
|
+
from typing import Dict, List
|
3
|
+
|
4
|
+
from ..discovery.resource_tracker import ResourceTracker
|
5
|
+
from ..utils.logging import setup_logger
|
6
|
+
from .models import VMConfig, VMInfo, VMResources, VMNotFoundError
|
7
|
+
from .provider import VMProvider
|
8
|
+
from .name_mapper import VMNameMapper
|
9
|
+
from .cloud_init import generate_cloud_init, cleanup_cloud_init
|
10
|
+
|
11
|
+
logger = setup_logger(__name__)
|
12
|
+
|
13
|
+
|
14
|
+
class VMService:
|
15
|
+
"""Service for managing the lifecycle of VMs."""
|
16
|
+
|
17
|
+
def __init__(
|
18
|
+
self,
|
19
|
+
provider: VMProvider,
|
20
|
+
resource_tracker: ResourceTracker,
|
21
|
+
name_mapper: VMNameMapper,
|
22
|
+
):
|
23
|
+
self.provider = provider
|
24
|
+
self.resource_tracker = resource_tracker
|
25
|
+
self.name_mapper = name_mapper
|
26
|
+
|
27
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
28
|
+
"""Create a new VM."""
|
29
|
+
if not await self.resource_tracker.allocate(config.resources, config.name):
|
30
|
+
raise ValueError("Insufficient resources available on provider")
|
31
|
+
|
32
|
+
multipass_name = f"golem-{config.name}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
33
|
+
await self.name_mapper.add_mapping(config.name, multipass_name)
|
34
|
+
|
35
|
+
cloud_init_path, config_id = generate_cloud_init(
|
36
|
+
hostname=config.name,
|
37
|
+
ssh_key=config.ssh_key
|
38
|
+
)
|
39
|
+
config.cloud_init_path = cloud_init_path
|
40
|
+
|
41
|
+
try:
|
42
|
+
vm_info = await self.provider.create_vm(config)
|
43
|
+
return vm_info
|
44
|
+
except Exception as e:
|
45
|
+
logger.error(f"Failed to create VM, deallocating resources", exc_info=True)
|
46
|
+
await self.resource_tracker.deallocate(config.resources, config.name)
|
47
|
+
raise
|
48
|
+
finally:
|
49
|
+
cleanup_cloud_init(cloud_init_path, config_id)
|
50
|
+
|
51
|
+
async def delete_vm(self, vm_id: str) -> None:
|
52
|
+
"""Delete a VM."""
|
53
|
+
multipass_name = await self.name_mapper.get_multipass_name(vm_id)
|
54
|
+
if not multipass_name:
|
55
|
+
logger.warning(f"No multipass name found for VM {vm_id}")
|
56
|
+
return
|
57
|
+
|
58
|
+
try:
|
59
|
+
vm_info = await self.provider.get_vm_status(multipass_name)
|
60
|
+
await self.provider.delete_vm(multipass_name)
|
61
|
+
await self.resource_tracker.deallocate(vm_info.resources, vm_id)
|
62
|
+
except VMNotFoundError:
|
63
|
+
logger.warning(f"VM {multipass_name} not found on provider, cleaning up resources")
|
64
|
+
# If the VM is not found, we still need to deallocate the resources we have tracked for it
|
65
|
+
# Since we can't get the resources from the provider, we'll have to assume the resources are what we have tracked
|
66
|
+
# This is not ideal, but it's the best we can do in this situation
|
67
|
+
# A better solution would be to store the resources in the name mapper
|
68
|
+
pass
|
69
|
+
finally:
|
70
|
+
await self.name_mapper.remove_mapping(vm_id)
|
71
|
+
|
72
|
+
async def list_vms(self) -> List[VMInfo]:
|
73
|
+
"""List all VMs."""
|
74
|
+
return await self.provider.list_vms()
|
75
|
+
|
76
|
+
async def get_vm_status(self, vm_id: str) -> VMInfo:
|
77
|
+
"""Get the status of a VM."""
|
78
|
+
multipass_name = await self.name_mapper.get_multipass_name(vm_id)
|
79
|
+
if not multipass_name:
|
80
|
+
from .models import VMNotFoundError
|
81
|
+
raise VMNotFoundError(f"VM {vm_id} not found")
|
82
|
+
return await self.provider.get_vm_status(multipass_name)
|
83
|
+
|
84
|
+
async def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
85
|
+
"""Get resources for all running VMs."""
|
86
|
+
return await self.provider.get_all_vms_resources()
|
87
|
+
async def initialize(self):
|
88
|
+
await self.provider.initialize()
|
89
|
+
|
90
|
+
async def shutdown(self):
|
91
|
+
await self.provider.cleanup()
|
@@ -1,27 +0,0 @@
|
|
1
|
-
provider/__init__.py,sha256=HO1fkPpZqPO3z8O8-eVIyx8xXSMIVuTR_b1YF0RtXOg,45
|
2
|
-
provider/api/__init__.py,sha256=ssX1ugDqEPt8Fn04IymgmG-Ev8PiXLsCSaiZVvHQnec,344
|
3
|
-
provider/api/models.py,sha256=JOzoNf1oE5N97UqTN5xuIrTkqn2tCHqPDaIzGA3jUyo,3513
|
4
|
-
provider/api/routes.py,sha256=P27RQvNqFWn6PacRwr1PaVz-yv5KAWsp9KeORejkXSI,6452
|
5
|
-
provider/config.py,sha256=Aqh2cnhZ8lmfiokZuGE-P9JuF8hedL5zRVlFpANvL0c,14585
|
6
|
-
provider/discovery/__init__.py,sha256=VR3NRoQtZRH5Vs8FG7jnGLR7p7wn7XeZdLaBb3t8e1g,123
|
7
|
-
provider/discovery/advertiser.py,sha256=yv7RbRf1K43qOLAEa2Olj9hhN8etl2qsBuoHok0xoVs,6784
|
8
|
-
provider/discovery/resource_tracker.py,sha256=8dYhJxoe_jLRwisHoA0jr575YhUKmLIqSXfW88KshcQ,6000
|
9
|
-
provider/main.py,sha256=eku9O0pTifNM7OcIukoYS9fBnI0vRkeuRxQkfsW0VKU,10051
|
10
|
-
provider/network/port_verifier.py,sha256=3l6WNwBHydggJRFYkAsuBp1eCxaU619kjWuM-zSVj2o,13267
|
11
|
-
provider/security/ethereum.py,sha256=SDRDbcjynbVy44kNnxlDcYLL0BZ3Qnc0DvmneQ-WKLE,1383
|
12
|
-
provider/utils/ascii_art.py,sha256=ykBFsztk57GIiz1NJ-EII5UvN74iECqQL4h9VmiW6Z8,3161
|
13
|
-
provider/utils/logging.py,sha256=C_elr0sJROHKQgErYpHJQvfujgh0k4Zf2gg8ZKfrmVk,2590
|
14
|
-
provider/utils/port_display.py,sha256=5d_604Eo-82dqx_yV2ZScq7bKQ8IsXacc-yXC_KAz3A,11031
|
15
|
-
provider/utils/retry.py,sha256=ekP2ucaSJNN-lBcrIvyHa4QYPKNITMl1a5V1X6BBvsw,1560
|
16
|
-
provider/utils/setup.py,sha256=Z5dLuBQkb5vdoQsu1HJZwXmu9NWsiBYJ7Vq9-C-_tY8,2932
|
17
|
-
provider/vm/__init__.py,sha256=JGs50tUmzOR1rQ_w4fMY_3XWylmiA1G7KKWZkVw51mY,501
|
18
|
-
provider/vm/cloud_init.py,sha256=E5dDH7dqStRcJNDfbarBBe83-c9N63W8B5ycIrHI8eU,4627
|
19
|
-
provider/vm/models.py,sha256=zkfvP5Z50SPDNajwZTt9NTDIMRQIsZLvSOsuirHEcJM,6256
|
20
|
-
provider/vm/multipass.py,sha256=RufJbl39d_mEXruX0gX1zCmEEiQ_DytPXgW6F1qVQaM,16667
|
21
|
-
provider/vm/name_mapper.py,sha256=MrshNeJ4Dw-WBsyiIVcn9N5xyOxaBKX4Yqhyh_m5IFg,4103
|
22
|
-
provider/vm/port_manager.py,sha256=m5NZYstWt0MpFgWjzfcYj1UqQzwuFRvjkqBV_AdnxDI,10499
|
23
|
-
provider/vm/proxy_manager.py,sha256=pVGb00WVg7bbFiLD90GowiwXX8zD80euz9ruzHvlgIY,14858
|
24
|
-
golem_vm_provider-0.1.24.dist-info/METADATA,sha256=5XDgxlAAfze2jD9Q-7iDIT01Tn-6Dl8h3E-KFzQsuDw,10594
|
25
|
-
golem_vm_provider-0.1.24.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
26
|
-
golem_vm_provider-0.1.24.dist-info/entry_points.txt,sha256=E4rCWo_Do_2zCG_GewNuftfVlHF_8b_OvioZre0dfeA,54
|
27
|
-
golem_vm_provider-0.1.24.dist-info/RECORD,,
|