golem-vm-provider 0.1.26__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {golem_vm_provider-0.1.26.dist-info → golem_vm_provider-0.1.28.dist-info}/METADATA +7 -2
- golem_vm_provider-0.1.28.dist-info/RECORD +38 -0
- {golem_vm_provider-0.1.26.dist-info → golem_vm_provider-0.1.28.dist-info}/entry_points.txt +1 -0
- provider/api/models.py +7 -7
- provider/api/routes.py +89 -95
- provider/config.py +50 -28
- provider/container.py +84 -0
- provider/discovery/__init__.py +8 -2
- provider/discovery/advertiser.py +41 -63
- provider/discovery/golem_base_advertiser.py +12 -6
- provider/discovery/resource_monitor.py +34 -0
- provider/discovery/resource_tracker.py +1 -1
- provider/discovery/service.py +24 -0
- provider/main.py +56 -155
- provider/service.py +67 -0
- provider/utils/__init__.py +0 -0
- provider/utils/logging.py +11 -27
- provider/utils/port_display.py +6 -8
- provider/utils/retry.py +39 -0
- provider/vm/__init__.py +1 -1
- provider/vm/models.py +8 -7
- provider/vm/multipass.py +2 -420
- provider/vm/multipass_adapter.py +221 -0
- provider/vm/name_mapper.py +5 -5
- provider/vm/port_manager.py +24 -6
- provider/vm/provider.py +48 -0
- provider/vm/proxy_manager.py +1 -1
- provider/vm/service.py +91 -0
- golem_vm_provider-0.1.26.dist-info/RECORD +0 -30
- {golem_vm_provider-0.1.26.dist-info → golem_vm_provider-0.1.28.dist-info}/WHEEL +0 -0
@@ -0,0 +1,221 @@
|
|
1
|
+
import json
|
2
|
+
import uuid
|
3
|
+
import subprocess
|
4
|
+
from pathlib import Path
|
5
|
+
import asyncio
|
6
|
+
from typing import Dict, List, Optional
|
7
|
+
from ..utils.retry import async_retry_unless_not_found
|
8
|
+
|
9
|
+
from ..config import settings
|
10
|
+
from ..utils.logging import setup_logger
|
11
|
+
from .models import VMConfig, VMInfo, VMResources, VMStatus, VMError, VMNotFoundError
|
12
|
+
from .provider import VMProvider
|
13
|
+
|
14
|
+
logger = setup_logger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class MultipassError(VMError):
|
18
|
+
"""Raised when multipass operations fail."""
|
19
|
+
pass
|
20
|
+
|
21
|
+
|
22
|
+
class MultipassAdapter(VMProvider):
|
23
|
+
"""Manages VMs using Multipass."""
|
24
|
+
|
25
|
+
def __init__(self, proxy_manager, name_mapper):
|
26
|
+
self.multipass_path = settings.MULTIPASS_BINARY_PATH
|
27
|
+
self.proxy_manager = proxy_manager
|
28
|
+
self.name_mapper = name_mapper
|
29
|
+
|
30
|
+
async def _run_multipass(self, args: List[str], check: bool = True) -> subprocess.CompletedProcess:
|
31
|
+
"""Run a multipass command."""
|
32
|
+
# Commands that produce JSON or version info that we need to parse.
|
33
|
+
commands_to_capture = ['info', 'version']
|
34
|
+
should_capture = args[0] in commands_to_capture
|
35
|
+
|
36
|
+
# We add a timeout to the launch command to prevent it from hanging indefinitely
|
37
|
+
# e.g. during image download. 300 seconds = 5 minutes.
|
38
|
+
timeout = 300 if args[0] == 'launch' else None
|
39
|
+
|
40
|
+
try:
|
41
|
+
return await asyncio.to_thread(
|
42
|
+
subprocess.run,
|
43
|
+
[self.multipass_path, *args],
|
44
|
+
capture_output=should_capture,
|
45
|
+
text=True,
|
46
|
+
check=check,
|
47
|
+
timeout=timeout
|
48
|
+
)
|
49
|
+
except subprocess.CalledProcessError as e:
|
50
|
+
stderr = e.stderr if should_capture and e.stderr else "No stderr captured. See provider logs for command output."
|
51
|
+
raise MultipassError(f"Multipass command failed: {stderr}")
|
52
|
+
except subprocess.TimeoutExpired as e:
|
53
|
+
stderr = e.stderr if should_capture and e.stderr else "No stderr captured. See provider logs for command output."
|
54
|
+
raise MultipassError(f"Multipass command '{' '.join(args)}' timed out after {timeout} seconds. Stderr: {stderr}")
|
55
|
+
|
56
|
+
@async_retry_unless_not_found(retries=5, delay=2.0)
|
57
|
+
async def _get_vm_info(self, vm_id: str) -> Dict:
|
58
|
+
"""Get detailed information about a VM."""
|
59
|
+
try:
|
60
|
+
result = await self._run_multipass(["info", vm_id, "--format", "json"])
|
61
|
+
logger.info(f"Raw multipass info for {vm_id}: {result.stdout}")
|
62
|
+
info = json.loads(result.stdout)
|
63
|
+
vm_info = info["info"][vm_id]
|
64
|
+
essential_fields = ["state", "ipv4", "cpu_count", "memory", "disks"]
|
65
|
+
if not all(field in vm_info for field in essential_fields):
|
66
|
+
raise KeyError(f"Essential fields missing from VM info. Got: {list(vm_info.keys())}")
|
67
|
+
return vm_info
|
68
|
+
except MultipassError as e:
|
69
|
+
if "does not exist" in str(e):
|
70
|
+
raise VMNotFoundError(f"VM {vm_id} not found in multipass") from e
|
71
|
+
raise
|
72
|
+
except (json.JSONDecodeError, KeyError) as e:
|
73
|
+
raise MultipassError(f"Failed to parse VM info or essential fields are missing: {e}")
|
74
|
+
|
75
|
+
async def initialize(self) -> None:
|
76
|
+
"""Initialize the VM provider."""
|
77
|
+
try:
|
78
|
+
result = await self._run_multipass(["version"])
|
79
|
+
logger.info(f"🔧 Using Multipass version: {result.stdout.strip()}")
|
80
|
+
except (subprocess.CalledProcessError, FileNotFoundError) as e:
|
81
|
+
raise MultipassError(f"Failed to verify multipass installation: {e}")
|
82
|
+
|
83
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
84
|
+
"""Create a new VM."""
|
85
|
+
multipass_name = f"golem-{uuid.uuid4()}"
|
86
|
+
await self.name_mapper.add_mapping(config.name, multipass_name)
|
87
|
+
|
88
|
+
launch_cmd = [
|
89
|
+
"launch",
|
90
|
+
config.image,
|
91
|
+
"--name", multipass_name,
|
92
|
+
"--cloud-init", config.cloud_init_path,
|
93
|
+
"--cpus", str(config.resources.cpu),
|
94
|
+
"--memory", f"{config.resources.memory}G",
|
95
|
+
"--disk", f"{config.resources.storage}G"
|
96
|
+
]
|
97
|
+
try:
|
98
|
+
logger.info(f"Running multipass command: {' '.join(launch_cmd)}")
|
99
|
+
await self._run_multipass(launch_cmd)
|
100
|
+
logger.info(f"VM {multipass_name} launched, waiting for it to be ready...")
|
101
|
+
|
102
|
+
ip_address = None
|
103
|
+
max_retries = 15
|
104
|
+
retry_delay = 5 # seconds
|
105
|
+
for attempt in range(max_retries):
|
106
|
+
try:
|
107
|
+
info = await self._get_vm_info(multipass_name)
|
108
|
+
if info.get("state", "").lower() == "running" and info.get("ipv4"):
|
109
|
+
ip_address = info["ipv4"][0]
|
110
|
+
break
|
111
|
+
logger.debug(f"VM {config.name} status is {info.get('state')}, waiting...")
|
112
|
+
except (MultipassError, VMNotFoundError):
|
113
|
+
logger.debug(f"VM {config.name} not found yet, retrying in {retry_delay}s...")
|
114
|
+
|
115
|
+
await asyncio.sleep(retry_delay)
|
116
|
+
|
117
|
+
if not ip_address:
|
118
|
+
raise MultipassError(f"VM {config.name} did not become ready or get an IP in time.")
|
119
|
+
|
120
|
+
# Configure proxy to allocate a port
|
121
|
+
if not await self.proxy_manager.add_vm(multipass_name, ip_address):
|
122
|
+
raise MultipassError(f"Failed to configure proxy for VM {multipass_name}")
|
123
|
+
|
124
|
+
# Now get the full status, which will include the allocated port
|
125
|
+
vm_info = await self.get_vm_status(multipass_name)
|
126
|
+
logger.info(f"Successfully created VM: {vm_info.dict()}")
|
127
|
+
return vm_info
|
128
|
+
|
129
|
+
except Exception as e:
|
130
|
+
logger.error(f"VM creation for {config.name} failed. Cleaning up.", exc_info=True)
|
131
|
+
await self._run_multipass(["delete", multipass_name, "--purge"], check=False)
|
132
|
+
await self.proxy_manager.remove_vm(multipass_name)
|
133
|
+
await self.name_mapper.remove_mapping(config.name)
|
134
|
+
raise MultipassError(f"Failed to create VM {config.name}: {e}") from e
|
135
|
+
|
136
|
+
async def delete_vm(self, multipass_name: str) -> None:
|
137
|
+
"""Delete a VM."""
|
138
|
+
requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
|
139
|
+
if not requestor_name:
|
140
|
+
logger.warning(f"No mapping found for {multipass_name}, cannot remove mapping.")
|
141
|
+
else:
|
142
|
+
await self.name_mapper.remove_mapping(requestor_name)
|
143
|
+
await self._run_multipass(["delete", multipass_name, "--purge"], check=False)
|
144
|
+
|
145
|
+
async def list_vms(self) -> List[VMInfo]:
|
146
|
+
"""List all VMs."""
|
147
|
+
all_mappings = self.name_mapper.list_mappings()
|
148
|
+
vms = []
|
149
|
+
for requestor_name in all_mappings.keys():
|
150
|
+
try:
|
151
|
+
vm_info = await self.get_vm_status(requestor_name)
|
152
|
+
vms.append(vm_info)
|
153
|
+
except VMNotFoundError:
|
154
|
+
logger.warning(f"VM {requestor_name} not found, but a mapping exists. It may have been deleted externally.")
|
155
|
+
return vms
|
156
|
+
|
157
|
+
async def start_vm(self, multipass_name: str) -> VMInfo:
|
158
|
+
"""Start a VM."""
|
159
|
+
await self._run_multipass(["start", multipass_name])
|
160
|
+
return await self.get_vm_status(multipass_name)
|
161
|
+
|
162
|
+
async def stop_vm(self, multipass_name: str) -> VMInfo:
|
163
|
+
"""Stop a VM."""
|
164
|
+
await self._run_multipass(["stop", multipass_name])
|
165
|
+
return await self.get_vm_status(multipass_name)
|
166
|
+
|
167
|
+
async def get_vm_status(self, multipass_name: str) -> VMInfo:
|
168
|
+
"""Get the status of a VM."""
|
169
|
+
try:
|
170
|
+
info = await self._get_vm_info(multipass_name)
|
171
|
+
except MultipassError:
|
172
|
+
raise VMNotFoundError(f"VM {multipass_name} not found in multipass")
|
173
|
+
|
174
|
+
requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
|
175
|
+
if not requestor_name:
|
176
|
+
raise VMNotFoundError(f"Mapping for VM {multipass_name} not found")
|
177
|
+
|
178
|
+
ipv4 = info.get("ipv4")
|
179
|
+
ip_address = ipv4[0] if ipv4 else None
|
180
|
+
logger.debug(f"Parsed VM info for {requestor_name}: {info}")
|
181
|
+
|
182
|
+
disks_info = info.get("disks", {})
|
183
|
+
total_storage = sum(int(disk.get("total", 0)) for disk in disks_info.values())
|
184
|
+
vm_info_obj = VMInfo(
|
185
|
+
id=requestor_name,
|
186
|
+
name=requestor_name,
|
187
|
+
status=VMStatus(info["state"].lower()),
|
188
|
+
resources=VMResources(
|
189
|
+
cpu=int(info.get("cpu_count", "1")),
|
190
|
+
memory=round(info.get("memory", {}).get("total", 1024**3) / (1024**3)),
|
191
|
+
storage=round(total_storage / (1024**3)) if total_storage > 0 else 10
|
192
|
+
),
|
193
|
+
ip_address=ip_address,
|
194
|
+
ssh_port=self.proxy_manager.get_port(multipass_name)
|
195
|
+
)
|
196
|
+
logger.debug(f"Constructed VMInfo object: {vm_info_obj.dict()}")
|
197
|
+
return vm_info_obj
|
198
|
+
|
199
|
+
async def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
200
|
+
"""Get resources for all running VMs."""
|
201
|
+
all_mappings = self.name_mapper.list_mappings()
|
202
|
+
vm_resources = {}
|
203
|
+
for requestor_name, multipass_name in all_mappings.items():
|
204
|
+
try:
|
205
|
+
info = await self._get_vm_info(multipass_name)
|
206
|
+
disks_info = info.get("disks", {})
|
207
|
+
total_storage = sum(int(disk.get("total", 0)) for disk in disks_info.values())
|
208
|
+
vm_resources[requestor_name] = VMResources(
|
209
|
+
cpu=int(info.get("cpu_count", "1")),
|
210
|
+
memory=round(info.get("memory", {}).get("total", 1024**3) / (1024**3)),
|
211
|
+
storage=round(total_storage / (1024**3)) if total_storage > 0 else 10
|
212
|
+
)
|
213
|
+
except (MultipassError, VMNotFoundError):
|
214
|
+
logger.warning(f"Could not retrieve resources for VM {requestor_name} ({multipass_name}). It may have been deleted.")
|
215
|
+
except Exception as e:
|
216
|
+
logger.error(f"Failed to get info for VM {requestor_name}: {e}")
|
217
|
+
return vm_resources
|
218
|
+
|
219
|
+
async def cleanup(self) -> None:
|
220
|
+
"""Cleanup resources used by the provider."""
|
221
|
+
pass
|
provider/vm/name_mapper.py
CHANGED
@@ -9,21 +9,21 @@ logger = logging.getLogger(__name__)
|
|
9
9
|
class VMNameMapper:
|
10
10
|
"""Maps between requestor VM names and multipass VM names."""
|
11
11
|
|
12
|
-
def __init__(self,
|
12
|
+
def __init__(self, db_path: Optional[Path] = None):
|
13
13
|
"""Initialize name mapper.
|
14
14
|
|
15
15
|
Args:
|
16
|
-
|
16
|
+
db_path: Optional path to persist mappings
|
17
17
|
"""
|
18
18
|
self._name_map: Dict[str, str] = {} # requestor_name -> multipass_name
|
19
19
|
self._reverse_map: Dict[str, str] = {} # multipass_name -> requestor_name
|
20
20
|
self._lock = asyncio.Lock()
|
21
|
-
self._storage_path =
|
21
|
+
self._storage_path = db_path
|
22
22
|
|
23
23
|
# Load existing mappings if storage path provided
|
24
|
-
if
|
24
|
+
if db_path and db_path.exists():
|
25
25
|
try:
|
26
|
-
with open(
|
26
|
+
with open(db_path) as f:
|
27
27
|
data = json.load(f)
|
28
28
|
self._name_map = data.get('name_map', {})
|
29
29
|
self._reverse_map = data.get('reverse_map', {})
|
provider/vm/port_manager.py
CHANGED
@@ -47,10 +47,13 @@ class PortManager:
|
|
47
47
|
self._existing_ports = existing_ports or set()
|
48
48
|
|
49
49
|
# Initialize port verifier with default servers
|
50
|
-
|
51
|
-
"http://localhost:9000"
|
52
|
-
|
53
|
-
|
50
|
+
if settings.DEV_MODE:
|
51
|
+
self.port_check_servers = ["http://localhost:9000"]
|
52
|
+
else:
|
53
|
+
self.port_check_servers = port_check_servers or [
|
54
|
+
"http://localhost:9000", # Local development server
|
55
|
+
"http://195.201.39.101:9000", # Production servers
|
56
|
+
]
|
54
57
|
self.discovery_port = discovery_port or settings.PORT
|
55
58
|
self.skip_verification = skip_verification
|
56
59
|
self.port_verifier = PortVerifier(
|
@@ -105,7 +108,21 @@ class PortManager:
|
|
105
108
|
|
106
109
|
# Clear existing verified ports before verification
|
107
110
|
self.verified_ports.clear()
|
108
|
-
results =
|
111
|
+
results = {}
|
112
|
+
if not self.skip_verification:
|
113
|
+
try:
|
114
|
+
results = await self.port_verifier.verify_ports(ssh_ports)
|
115
|
+
except RuntimeError as e:
|
116
|
+
logger.error(f"Port verification failed: {e}")
|
117
|
+
display.print_summary(
|
118
|
+
PortVerificationResult(
|
119
|
+
port=self.discovery_port,
|
120
|
+
accessible=False,
|
121
|
+
error=str(e)
|
122
|
+
),
|
123
|
+
{}
|
124
|
+
)
|
125
|
+
return False
|
109
126
|
|
110
127
|
# Add provider port as verified since we already checked it
|
111
128
|
results[self.discovery_port] = PortVerificationResult(
|
@@ -237,7 +254,8 @@ class PortManager:
|
|
237
254
|
used_ports = self._get_used_ports()
|
238
255
|
|
239
256
|
# Find first available verified port
|
240
|
-
ports_to_check = sorted(self.verified_ports) if not self.skip_verification else range(
|
257
|
+
ports_to_check = sorted(list(self.verified_ports)) if not self.skip_verification else range(
|
258
|
+
self.start_port, self.end_port)
|
241
259
|
for port in ports_to_check:
|
242
260
|
if port not in used_ports:
|
243
261
|
# Quick check if port is actually available
|
provider/vm/provider.py
ADDED
@@ -0,0 +1,48 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Dict, List
|
3
|
+
|
4
|
+
from .models import VMConfig, VMInfo, VMResources
|
5
|
+
|
6
|
+
|
7
|
+
class VMProvider(ABC):
|
8
|
+
"""Abstract base class for VM providers."""
|
9
|
+
|
10
|
+
@abstractmethod
|
11
|
+
async def initialize(self) -> None:
|
12
|
+
"""Initialize the VM provider."""
|
13
|
+
pass
|
14
|
+
|
15
|
+
@abstractmethod
|
16
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
17
|
+
"""Create a new VM."""
|
18
|
+
pass
|
19
|
+
|
20
|
+
@abstractmethod
|
21
|
+
async def delete_vm(self, vm_id: str) -> None:
|
22
|
+
"""Delete a VM."""
|
23
|
+
pass
|
24
|
+
|
25
|
+
@abstractmethod
|
26
|
+
async def start_vm(self, vm_id: str) -> VMInfo:
|
27
|
+
"""Start a VM."""
|
28
|
+
pass
|
29
|
+
|
30
|
+
@abstractmethod
|
31
|
+
async def stop_vm(self, vm_id: str) -> VMInfo:
|
32
|
+
"""Stop a VM."""
|
33
|
+
pass
|
34
|
+
|
35
|
+
@abstractmethod
|
36
|
+
async def get_vm_status(self, vm_id: str) -> VMInfo:
|
37
|
+
"""Get the status of a VM."""
|
38
|
+
pass
|
39
|
+
|
40
|
+
@abstractmethod
|
41
|
+
def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
42
|
+
"""Get resources for all running VMs."""
|
43
|
+
pass
|
44
|
+
|
45
|
+
@abstractmethod
|
46
|
+
async def cleanup(self) -> None:
|
47
|
+
"""Cleanup resources used by the provider."""
|
48
|
+
pass
|
provider/vm/proxy_manager.py
CHANGED
@@ -308,7 +308,7 @@ class PythonProxyManager:
|
|
308
308
|
"""
|
309
309
|
try:
|
310
310
|
# Use provided port or allocate one
|
311
|
-
if port is None
|
311
|
+
if port is None:
|
312
312
|
allocated_port = self.port_manager.allocate_port(vm_id)
|
313
313
|
if allocated_port is None:
|
314
314
|
logger.error(f"Failed to allocate port for VM {vm_id}")
|
provider/vm/service.py
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
from datetime import datetime
|
2
|
+
from typing import Dict, List
|
3
|
+
|
4
|
+
from ..discovery.resource_tracker import ResourceTracker
|
5
|
+
from ..utils.logging import setup_logger
|
6
|
+
from .models import VMConfig, VMInfo, VMResources, VMNotFoundError
|
7
|
+
from .provider import VMProvider
|
8
|
+
from .name_mapper import VMNameMapper
|
9
|
+
from .cloud_init import generate_cloud_init, cleanup_cloud_init
|
10
|
+
|
11
|
+
logger = setup_logger(__name__)
|
12
|
+
|
13
|
+
|
14
|
+
class VMService:
|
15
|
+
"""Service for managing the lifecycle of VMs."""
|
16
|
+
|
17
|
+
def __init__(
|
18
|
+
self,
|
19
|
+
provider: VMProvider,
|
20
|
+
resource_tracker: ResourceTracker,
|
21
|
+
name_mapper: VMNameMapper,
|
22
|
+
):
|
23
|
+
self.provider = provider
|
24
|
+
self.resource_tracker = resource_tracker
|
25
|
+
self.name_mapper = name_mapper
|
26
|
+
|
27
|
+
async def create_vm(self, config: VMConfig) -> VMInfo:
|
28
|
+
"""Create a new VM."""
|
29
|
+
if not await self.resource_tracker.allocate(config.resources, config.name):
|
30
|
+
raise ValueError("Insufficient resources available on provider")
|
31
|
+
|
32
|
+
multipass_name = f"golem-{config.name}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
33
|
+
await self.name_mapper.add_mapping(config.name, multipass_name)
|
34
|
+
|
35
|
+
cloud_init_path, config_id = generate_cloud_init(
|
36
|
+
hostname=config.name,
|
37
|
+
ssh_key=config.ssh_key
|
38
|
+
)
|
39
|
+
config.cloud_init_path = cloud_init_path
|
40
|
+
|
41
|
+
try:
|
42
|
+
vm_info = await self.provider.create_vm(config)
|
43
|
+
return vm_info
|
44
|
+
except Exception as e:
|
45
|
+
logger.error(f"Failed to create VM, deallocating resources", exc_info=True)
|
46
|
+
await self.resource_tracker.deallocate(config.resources, config.name)
|
47
|
+
raise
|
48
|
+
finally:
|
49
|
+
cleanup_cloud_init(cloud_init_path, config_id)
|
50
|
+
|
51
|
+
async def delete_vm(self, vm_id: str) -> None:
|
52
|
+
"""Delete a VM."""
|
53
|
+
multipass_name = await self.name_mapper.get_multipass_name(vm_id)
|
54
|
+
if not multipass_name:
|
55
|
+
logger.warning(f"No multipass name found for VM {vm_id}")
|
56
|
+
return
|
57
|
+
|
58
|
+
try:
|
59
|
+
vm_info = await self.provider.get_vm_status(multipass_name)
|
60
|
+
await self.provider.delete_vm(multipass_name)
|
61
|
+
await self.resource_tracker.deallocate(vm_info.resources, vm_id)
|
62
|
+
except VMNotFoundError:
|
63
|
+
logger.warning(f"VM {multipass_name} not found on provider, cleaning up resources")
|
64
|
+
# If the VM is not found, we still need to deallocate the resources we have tracked for it
|
65
|
+
# Since we can't get the resources from the provider, we'll have to assume the resources are what we have tracked
|
66
|
+
# This is not ideal, but it's the best we can do in this situation
|
67
|
+
# A better solution would be to store the resources in the name mapper
|
68
|
+
pass
|
69
|
+
finally:
|
70
|
+
await self.name_mapper.remove_mapping(vm_id)
|
71
|
+
|
72
|
+
async def list_vms(self) -> List[VMInfo]:
|
73
|
+
"""List all VMs."""
|
74
|
+
return await self.provider.list_vms()
|
75
|
+
|
76
|
+
async def get_vm_status(self, vm_id: str) -> VMInfo:
|
77
|
+
"""Get the status of a VM."""
|
78
|
+
multipass_name = await self.name_mapper.get_multipass_name(vm_id)
|
79
|
+
if not multipass_name:
|
80
|
+
from .models import VMNotFoundError
|
81
|
+
raise VMNotFoundError(f"VM {vm_id} not found")
|
82
|
+
return await self.provider.get_vm_status(multipass_name)
|
83
|
+
|
84
|
+
async def get_all_vms_resources(self) -> Dict[str, VMResources]:
|
85
|
+
"""Get resources for all running VMs."""
|
86
|
+
return await self.provider.get_all_vms_resources()
|
87
|
+
async def initialize(self):
|
88
|
+
await self.provider.initialize()
|
89
|
+
|
90
|
+
async def shutdown(self):
|
91
|
+
await self.provider.cleanup()
|
@@ -1,30 +0,0 @@
|
|
1
|
-
provider/__init__.py,sha256=HO1fkPpZqPO3z8O8-eVIyx8xXSMIVuTR_b1YF0RtXOg,45
|
2
|
-
provider/api/__init__.py,sha256=ssX1ugDqEPt8Fn04IymgmG-Ev8PiXLsCSaiZVvHQnec,344
|
3
|
-
provider/api/models.py,sha256=9799o6ZnBfG2LZCoqm2-6ONNNgqKB7bn5oIs_Ocq24s,3519
|
4
|
-
provider/api/routes.py,sha256=P27RQvNqFWn6PacRwr1PaVz-yv5KAWsp9KeORejkXSI,6452
|
5
|
-
provider/config.py,sha256=pFW0Qwej9eTZfx1BXTExhsXwul-drqn0Lu8wPhx399Y,16908
|
6
|
-
provider/discovery/__init__.py,sha256=VR3NRoQtZRH5Vs8FG7jnGLR7p7wn7XeZdLaBb3t8e1g,123
|
7
|
-
provider/discovery/advertiser.py,sha256=yv7RbRf1K43qOLAEa2Olj9hhN8etl2qsBuoHok0xoVs,6784
|
8
|
-
provider/discovery/golem_base_advertiser.py,sha256=UpSJyO6wyujBJ2xDrb6lhKC7zYv1vZg0db4t0k--5dk,5830
|
9
|
-
provider/discovery/golem_base_utils.py,sha256=xk7vznhMgzrn0AuGyk6-9N9ukp9oPdBbbk1RI-sVjp0,607
|
10
|
-
provider/discovery/resource_tracker.py,sha256=8dYhJxoe_jLRwisHoA0jr575YhUKmLIqSXfW88KshcQ,6000
|
11
|
-
provider/main.py,sha256=7Fj9h13uVL3dPeukAc2U3q_HonMsRhTSnj86lj6Hqtc,9898
|
12
|
-
provider/network/port_verifier.py,sha256=3l6WNwBHydggJRFYkAsuBp1eCxaU619kjWuM-zSVj2o,13267
|
13
|
-
provider/security/ethereum.py,sha256=EwPZj4JR8OEpto6LhKjuuT3Z9pBX6P7-UQaqJtqFkYQ,1242
|
14
|
-
provider/security/faucet.py,sha256=O2DgP3bIrRUm9tdLCdgnda9em0rPyeW42sWhO1EQJaA,5363
|
15
|
-
provider/utils/ascii_art.py,sha256=ykBFsztk57GIiz1NJ-EII5UvN74iECqQL4h9VmiW6Z8,3161
|
16
|
-
provider/utils/logging.py,sha256=C_elr0sJROHKQgErYpHJQvfujgh0k4Zf2gg8ZKfrmVk,2590
|
17
|
-
provider/utils/port_display.py,sha256=eLtkpYk5va1KWDQhEbxTYGymaXiPVpsYbvmdXM29IPo,12067
|
18
|
-
provider/utils/retry.py,sha256=ekP2ucaSJNN-lBcrIvyHa4QYPKNITMl1a5V1X6BBvsw,1560
|
19
|
-
provider/utils/setup.py,sha256=Z5dLuBQkb5vdoQsu1HJZwXmu9NWsiBYJ7Vq9-C-_tY8,2932
|
20
|
-
provider/vm/__init__.py,sha256=JGs50tUmzOR1rQ_w4fMY_3XWylmiA1G7KKWZkVw51mY,501
|
21
|
-
provider/vm/cloud_init.py,sha256=E5dDH7dqStRcJNDfbarBBe83-c9N63W8B5ycIrHI8eU,4627
|
22
|
-
provider/vm/models.py,sha256=RnfRQrziGBQZV_z6whQ8m328Abn2pItKZEZhT4S8AkU,6266
|
23
|
-
provider/vm/multipass.py,sha256=DOL_9fRXNu9LyWdW_HGCxG9C-iuuPnAF0u95sObb6Fk,16940
|
24
|
-
provider/vm/name_mapper.py,sha256=MrshNeJ4Dw-WBsyiIVcn9N5xyOxaBKX4Yqhyh_m5IFg,4103
|
25
|
-
provider/vm/port_manager.py,sha256=H7uLe-jJgBPtvnWrIHhRP-04sq11wMunOf0CFa_eDWE,11820
|
26
|
-
provider/vm/proxy_manager.py,sha256=aSzMVggH8BgE6L92yCSRtultEQiwHtPP8fnQzSF102Y,14941
|
27
|
-
golem_vm_provider-0.1.26.dist-info/METADATA,sha256=p4DYiuNE7QKUyIyev2sZVXiTMP3gshKOokTWGMepJ4M,10645
|
28
|
-
golem_vm_provider-0.1.26.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
29
|
-
golem_vm_provider-0.1.26.dist-info/entry_points.txt,sha256=5Jiie1dIXygmxmDW66bKKxQpmBLJ7leSKRrb8bkQALw,52
|
30
|
-
golem_vm_provider-0.1.26.dist-info/RECORD,,
|
File without changes
|