golem-vm-provider 0.1.19__tar.gz → 0.1.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/PKG-INFO +1 -1
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/main.py +25 -9
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/network/port_verifier.py +9 -3
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/multipass.py +4 -1
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/port_manager.py +15 -4
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/proxy_manager.py +124 -38
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/pyproject.toml +1 -1
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/README.md +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/__init__.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/api/__init__.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/api/models.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/api/routes.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/config.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/discovery/__init__.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/discovery/advertiser.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/discovery/resource_tracker.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/security/ethereum.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/utils/ascii_art.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/utils/logging.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/utils/port_display.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/utils/retry.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/utils/setup.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/__init__.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/cloud_init.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/models.py +0 -0
- {golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/vm/name_mapper.py +0 -0
@@ -18,28 +18,44 @@ app = FastAPI(title="VM on Golem Provider")
|
|
18
18
|
async def setup_provider() -> None:
|
19
19
|
"""Setup and initialize the provider components."""
|
20
20
|
try:
|
21
|
-
#
|
22
|
-
port_manager = app.state.port_manager
|
23
|
-
|
24
|
-
# Create resource tracker
|
21
|
+
# Create resource tracker first
|
25
22
|
logger.process("🔄 Initializing resource tracker...")
|
26
23
|
resource_tracker = ResourceTracker()
|
27
24
|
app.state.resource_tracker = resource_tracker
|
28
|
-
|
29
|
-
# Create provider with resource tracker and port manager
|
25
|
+
|
26
|
+
# Create provider with resource tracker and temporary port manager
|
30
27
|
logger.process("🔄 Initializing VM provider...")
|
31
|
-
provider = MultipassProvider(resource_tracker, port_manager=
|
28
|
+
provider = MultipassProvider(resource_tracker, port_manager=None) # Will be set later
|
29
|
+
|
32
30
|
try:
|
31
|
+
# Initialize provider (without port operations)
|
33
32
|
await asyncio.wait_for(provider.initialize(), timeout=30)
|
34
33
|
|
35
|
-
# Store provider
|
34
|
+
# Store provider reference
|
36
35
|
app.state.provider = provider
|
37
36
|
app.state.proxy_manager = provider.proxy_manager
|
38
37
|
|
39
|
-
# Restore proxy configurations
|
38
|
+
# Restore proxy configurations first
|
40
39
|
logger.process("🔄 Restoring proxy configurations...")
|
41
40
|
await app.state.proxy_manager._load_state()
|
42
41
|
|
42
|
+
# Now initialize port manager with knowledge of restored proxies
|
43
|
+
logger.process("🔄 Initializing port manager...")
|
44
|
+
port_manager = PortManager(
|
45
|
+
start_port=settings.PORT_RANGE_START,
|
46
|
+
end_port=settings.PORT_RANGE_END,
|
47
|
+
discovery_port=settings.PORT,
|
48
|
+
existing_ports=app.state.proxy_manager.get_active_ports()
|
49
|
+
)
|
50
|
+
|
51
|
+
if not await port_manager.initialize():
|
52
|
+
raise RuntimeError("Port verification failed")
|
53
|
+
|
54
|
+
# Update provider and proxy manager with verified port manager
|
55
|
+
app.state.port_manager = port_manager
|
56
|
+
provider.port_manager = port_manager
|
57
|
+
app.state.proxy_manager.port_manager = port_manager
|
58
|
+
|
43
59
|
except asyncio.TimeoutError:
|
44
60
|
logger.error("Provider initialization timed out")
|
45
61
|
raise
|
@@ -198,15 +198,21 @@ class PortVerifier:
|
|
198
198
|
))
|
199
199
|
logger.warning(error_msg)
|
200
200
|
|
201
|
-
# If no successful
|
202
|
-
if not any(
|
201
|
+
# If no servers were successful, fail verification
|
202
|
+
if not any(attempt.success for attempt in attempts):
|
203
203
|
error_msg = (
|
204
|
-
"Failed to
|
204
|
+
"Failed to connect to any port check servers. Please ensure:\n"
|
205
205
|
"1. At least one port check server is running and accessible\n"
|
206
206
|
"2. Your network connection is stable\n"
|
207
207
|
"3. The server URLs are correct"
|
208
208
|
)
|
209
209
|
logger.error(error_msg)
|
210
|
+
raise RuntimeError(error_msg)
|
211
|
+
|
212
|
+
# If no successful verifications but servers were reachable, mark ports as inaccessible
|
213
|
+
if not any(result.accessible for result in results.values()):
|
214
|
+
error_msg = "No ports were verified as accessible"
|
215
|
+
logger.error(error_msg)
|
210
216
|
results = {
|
211
217
|
port: PortVerificationResult(
|
212
218
|
port=port,
|
@@ -37,8 +37,11 @@ class MultipassProvider(VMProvider):
|
|
37
37
|
self.vm_data_dir.mkdir(parents=True, exist_ok=True)
|
38
38
|
|
39
39
|
# Initialize managers
|
40
|
-
self.proxy_manager = PythonProxyManager(port_manager=port_manager)
|
41
40
|
self.name_mapper = VMNameMapper(self.vm_data_dir / "vm_names.json")
|
41
|
+
self.proxy_manager = PythonProxyManager(
|
42
|
+
port_manager=port_manager,
|
43
|
+
name_mapper=self.name_mapper
|
44
|
+
)
|
42
45
|
|
43
46
|
def _verify_installation(self) -> None:
|
44
47
|
"""Verify multipass is installed and get version."""
|
@@ -23,7 +23,8 @@ class PortManager:
|
|
23
23
|
end_port: int = 50900,
|
24
24
|
state_file: Optional[str] = None,
|
25
25
|
port_check_servers: Optional[List[str]] = None,
|
26
|
-
discovery_port: Optional[int] = None
|
26
|
+
discovery_port: Optional[int] = None,
|
27
|
+
existing_ports: Optional[Set[int]] = None
|
27
28
|
):
|
28
29
|
"""Initialize the port manager.
|
29
30
|
|
@@ -32,6 +33,8 @@ class PortManager:
|
|
32
33
|
end_port: End of port range (exclusive)
|
33
34
|
state_file: Path to persist port assignments
|
34
35
|
port_check_servers: List of URLs for port checking services
|
36
|
+
discovery_port: Port used for discovery service
|
37
|
+
existing_ports: Set of ports that should be considered in use
|
35
38
|
"""
|
36
39
|
self.start_port = start_port
|
37
40
|
self.end_port = end_port
|
@@ -40,12 +43,12 @@ class PortManager:
|
|
40
43
|
self.lock = Lock()
|
41
44
|
self._used_ports: dict[str, int] = {} # vm_id -> port
|
42
45
|
self.verified_ports: Set[int] = set()
|
46
|
+
self._existing_ports = existing_ports or set()
|
43
47
|
|
44
48
|
# Initialize port verifier with default servers
|
45
49
|
self.port_check_servers = port_check_servers or [
|
46
50
|
"http://localhost:9000", # Local development server
|
47
51
|
"http://195.201.39.101:9000", # Production servers
|
48
|
-
|
49
52
|
]
|
50
53
|
self.discovery_port = discovery_port or settings.PORT
|
51
54
|
self.port_verifier = PortVerifier(
|
@@ -53,7 +56,14 @@ class PortManager:
|
|
53
56
|
discovery_port=self.discovery_port
|
54
57
|
)
|
55
58
|
|
59
|
+
# Load state after setting existing ports
|
56
60
|
self._load_state()
|
61
|
+
|
62
|
+
# Mark existing ports as used and remove from verified ports
|
63
|
+
for port in self._existing_ports:
|
64
|
+
if port in self.verified_ports:
|
65
|
+
self.verified_ports.remove(port)
|
66
|
+
logger.debug(f"Marked port {port} as in use from existing ports")
|
57
67
|
|
58
68
|
async def initialize(self) -> bool:
|
59
69
|
"""Initialize port manager with verification.
|
@@ -70,8 +80,9 @@ class PortManager:
|
|
70
80
|
)
|
71
81
|
display.print_header()
|
72
82
|
|
73
|
-
# Only verify
|
74
|
-
|
83
|
+
# Only verify ports that aren't already marked as in use
|
84
|
+
available_ports = set(range(self.start_port, self.end_port)) - self._existing_ports
|
85
|
+
ssh_ports = list(available_ports)
|
75
86
|
logger.info(f"Starting port verification...")
|
76
87
|
logger.info(f"SSH ports range: {self.start_port}-{self.end_port}")
|
77
88
|
logger.info(
|
@@ -3,7 +3,7 @@ import json
|
|
3
3
|
import asyncio
|
4
4
|
import logging
|
5
5
|
from pathlib import Path
|
6
|
-
from typing import Optional, Dict
|
6
|
+
from typing import Optional, Dict, Set
|
7
7
|
from asyncio import Task, Transport, Protocol
|
8
8
|
|
9
9
|
from .port_manager import PortManager
|
@@ -146,59 +146,145 @@ class PythonProxyManager:
|
|
146
146
|
|
147
147
|
def __init__(
|
148
148
|
self,
|
149
|
-
port_manager: PortManager,
|
149
|
+
port_manager: Optional[PortManager],
|
150
|
+
name_mapper: "VMNameMapper",
|
150
151
|
state_file: Optional[str] = None
|
151
152
|
):
|
152
153
|
"""Initialize the proxy manager.
|
153
154
|
|
154
155
|
Args:
|
155
|
-
port_manager: Port allocation manager
|
156
|
+
port_manager: Port allocation manager (optional during startup)
|
157
|
+
name_mapper: VM name mapping manager
|
156
158
|
state_file: Path to persist proxy state
|
157
159
|
"""
|
158
160
|
self.port_manager = port_manager
|
161
|
+
self.name_mapper = name_mapper
|
159
162
|
self.state_file = state_file or os.path.expanduser("~/.golem/provider/proxy_state.json")
|
160
|
-
self._proxies: Dict[str, ProxyServer] = {} #
|
161
|
-
|
163
|
+
self._proxies: Dict[str, ProxyServer] = {} # multipass_name -> ProxyServer
|
164
|
+
self._state_version = 1 # For future state schema migrations
|
165
|
+
self._active_ports: Dict[str, int] = {} # multipass_name -> port
|
162
166
|
|
167
|
+
def get_active_ports(self) -> Set[int]:
|
168
|
+
"""Get set of ports that should be considered in use.
|
169
|
+
|
170
|
+
Returns:
|
171
|
+
Set of ports that are allocated to VMs
|
172
|
+
"""
|
173
|
+
return set(self._active_ports.values())
|
174
|
+
|
163
175
|
async def _load_state(self) -> None:
|
164
176
|
"""Load and restore proxy state from file."""
|
165
177
|
try:
|
166
178
|
state_path = Path(self.state_file)
|
167
|
-
if state_path.exists():
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
179
|
+
if not state_path.exists():
|
180
|
+
return
|
181
|
+
|
182
|
+
with open(state_path, 'r') as f:
|
183
|
+
state = json.load(f)
|
184
|
+
|
185
|
+
# Check state version for future migrations
|
186
|
+
if state.get('version', 1) != self._state_version:
|
187
|
+
logger.warning(f"State version mismatch: {state.get('version')} != {self._state_version}")
|
188
|
+
|
189
|
+
# First load all port allocations
|
190
|
+
for requestor_name, proxy_info in state.get('proxies', {}).items():
|
191
|
+
multipass_name = await self.name_mapper.get_multipass_name(requestor_name)
|
192
|
+
if multipass_name:
|
193
|
+
self._active_ports[multipass_name] = proxy_info['port']
|
194
|
+
|
195
|
+
# Then attempt to restore proxies with retries
|
196
|
+
restore_tasks = []
|
197
|
+
for requestor_name, proxy_info in state.get('proxies', {}).items():
|
198
|
+
multipass_name = await self.name_mapper.get_multipass_name(requestor_name)
|
199
|
+
if multipass_name:
|
200
|
+
task = self._restore_proxy_with_retry(
|
201
|
+
multipass_name=multipass_name,
|
202
|
+
vm_ip=proxy_info['target'],
|
203
|
+
port=proxy_info['port']
|
204
|
+
)
|
205
|
+
restore_tasks.append(task)
|
206
|
+
else:
|
207
|
+
logger.warning(f"No multipass name found for requestor VM {requestor_name}")
|
208
|
+
|
209
|
+
# Wait for all restore attempts
|
210
|
+
if restore_tasks:
|
211
|
+
results = await asyncio.gather(*restore_tasks, return_exceptions=True)
|
212
|
+
successful = sum(1 for r in results if r is True)
|
213
|
+
logger.info(f"Restored {successful}/{len(state.get('proxies', {}))} proxy configurations")
|
214
|
+
|
186
215
|
except Exception as e:
|
187
216
|
logger.error(f"Failed to load proxy state: {e}")
|
217
|
+
|
218
|
+
async def _restore_proxy_with_retry(
|
219
|
+
self,
|
220
|
+
multipass_name: str,
|
221
|
+
vm_ip: str,
|
222
|
+
port: int,
|
223
|
+
max_retries: int = 3,
|
224
|
+
initial_delay: float = 1.0
|
225
|
+
) -> bool:
|
226
|
+
"""Attempt to restore a proxy with exponential backoff retry.
|
227
|
+
|
228
|
+
Args:
|
229
|
+
multipass_name: Multipass VM name
|
230
|
+
vm_ip: VM IP address
|
231
|
+
port: Port to use
|
232
|
+
max_retries: Maximum number of retry attempts
|
233
|
+
initial_delay: Initial delay between retries (doubles each attempt)
|
234
|
+
|
235
|
+
Returns:
|
236
|
+
bool: True if restoration was successful
|
237
|
+
"""
|
238
|
+
delay = initial_delay
|
239
|
+
for attempt in range(max_retries):
|
240
|
+
try:
|
241
|
+
if attempt > 0:
|
242
|
+
logger.info(f"Retry attempt {attempt + 1} for {multipass_name} on port {port}")
|
243
|
+
await asyncio.sleep(delay)
|
244
|
+
delay *= 2 # Exponential backoff
|
245
|
+
|
246
|
+
# Attempt to create proxy
|
247
|
+
proxy = ProxyServer(port, vm_ip)
|
248
|
+
await proxy.start()
|
249
|
+
|
250
|
+
self._proxies[multipass_name] = proxy
|
251
|
+
logger.info(f"Successfully restored proxy for {multipass_name} on port {port}")
|
252
|
+
return True
|
253
|
+
|
254
|
+
except Exception as e:
|
255
|
+
logger.warning(f"Attempt {attempt + 1} failed for {multipass_name}: {e}")
|
256
|
+
if attempt == max_retries - 1:
|
257
|
+
logger.error(f"Failed to restore proxy for {multipass_name} after {max_retries} attempts")
|
258
|
+
# Remove from active ports if all retries failed
|
259
|
+
self._active_ports.pop(multipass_name, None)
|
260
|
+
return False
|
188
261
|
|
189
|
-
def _save_state(self) -> None:
|
190
|
-
"""Save current proxy state to file."""
|
262
|
+
async def _save_state(self) -> None:
|
263
|
+
"""Save current proxy state to file using requestor names."""
|
191
264
|
try:
|
192
265
|
state = {
|
193
|
-
|
194
|
-
|
195
|
-
'target': proxy.target_host
|
196
|
-
}
|
197
|
-
for vm_id, proxy in self._proxies.items()
|
266
|
+
'version': self._state_version,
|
267
|
+
'proxies': {}
|
198
268
|
}
|
269
|
+
|
270
|
+
for multipass_name, proxy in self._proxies.items():
|
271
|
+
requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
|
272
|
+
if requestor_name:
|
273
|
+
state['proxies'][requestor_name] = {
|
274
|
+
'port': proxy.listen_port,
|
275
|
+
'target': proxy.target_host
|
276
|
+
}
|
277
|
+
|
278
|
+
# Save to temporary file first
|
279
|
+
temp_file = f"{self.state_file}.tmp"
|
199
280
|
os.makedirs(os.path.dirname(self.state_file), exist_ok=True)
|
200
|
-
|
201
|
-
|
281
|
+
|
282
|
+
with open(temp_file, 'w') as f:
|
283
|
+
json.dump(state, f, indent=2)
|
284
|
+
|
285
|
+
# Atomic rename
|
286
|
+
os.replace(temp_file, self.state_file)
|
287
|
+
|
202
288
|
except Exception as e:
|
203
289
|
logger.error(f"Failed to save proxy state: {e}")
|
204
290
|
|
@@ -206,7 +292,7 @@ class PythonProxyManager:
|
|
206
292
|
"""Add proxy configuration for a new VM.
|
207
293
|
|
208
294
|
Args:
|
209
|
-
vm_id: Unique identifier for the VM
|
295
|
+
vm_id: Unique identifier for the VM (multipass name)
|
210
296
|
vm_ip: IP address of the VM
|
211
297
|
port: Optional specific port to use, if not provided one will be allocated
|
212
298
|
|
@@ -226,7 +312,7 @@ class PythonProxyManager:
|
|
226
312
|
await proxy.start()
|
227
313
|
|
228
314
|
self._proxies[vm_id] = proxy
|
229
|
-
self._save_state()
|
315
|
+
await self._save_state()
|
230
316
|
|
231
317
|
logger.info(f"Started proxy for VM {vm_id} on port {port}")
|
232
318
|
return True
|
@@ -242,14 +328,14 @@ class PythonProxyManager:
|
|
242
328
|
"""Remove proxy configuration for a VM.
|
243
329
|
|
244
330
|
Args:
|
245
|
-
vm_id: Unique identifier for the VM
|
331
|
+
vm_id: Unique identifier for the VM (multipass name)
|
246
332
|
"""
|
247
333
|
try:
|
248
334
|
if vm_id in self._proxies:
|
249
335
|
proxy = self._proxies.pop(vm_id)
|
250
336
|
await proxy.stop()
|
251
337
|
self.port_manager.deallocate_port(vm_id)
|
252
|
-
self._save_state()
|
338
|
+
await self._save_state()
|
253
339
|
logger.info(f"Removed proxy for VM {vm_id}")
|
254
340
|
except Exception as e:
|
255
341
|
logger.error(f"Failed to remove proxy for VM {vm_id}: {e}")
|
@@ -270,7 +356,7 @@ class PythonProxyManager:
|
|
270
356
|
cleanup_errors.append(f"Failed to remove proxy for VM {vm_id}: {e}")
|
271
357
|
|
272
358
|
try:
|
273
|
-
self._save_state()
|
359
|
+
await self._save_state()
|
274
360
|
except Exception as e:
|
275
361
|
cleanup_errors.append(f"Failed to save state: {e}")
|
276
362
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "golem-vm-provider"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.21"
|
4
4
|
description = "VM on Golem Provider Node - Run your own provider node to offer VMs on the Golem Network"
|
5
5
|
authors = ["Phillip Jensen <phillip+vm-on-golem@golemgrid.com>"]
|
6
6
|
readme = "README.md"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{golem_vm_provider-0.1.19 → golem_vm_provider-0.1.21}/provider/discovery/resource_tracker.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|