golem-vm-provider 0.1.19__py3-none-any.whl → 0.1.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: golem-vm-provider
3
- Version: 0.1.19
3
+ Version: 0.1.21
4
4
  Summary: VM on Golem Provider Node - Run your own provider node to offer VMs on the Golem Network
5
5
  Keywords: golem,vm,provider,cloud,decentralized
6
6
  Author: Phillip Jensen
@@ -6,8 +6,8 @@ provider/config.py,sha256=-Cu05ebOjUbhnh5iv3raQ7Z79HMhZ9EcRIRrZVW3Ino,14513
6
6
  provider/discovery/__init__.py,sha256=VR3NRoQtZRH5Vs8FG7jnGLR7p7wn7XeZdLaBb3t8e1g,123
7
7
  provider/discovery/advertiser.py,sha256=yv7RbRf1K43qOLAEa2Olj9hhN8etl2qsBuoHok0xoVs,6784
8
8
  provider/discovery/resource_tracker.py,sha256=8dYhJxoe_jLRwisHoA0jr575YhUKmLIqSXfW88KshcQ,6000
9
- provider/main.py,sha256=jl80WRIvsvfbHFvoPZqtIhFDUJm0andcyXqUu6Dfi2E,9389
10
- provider/network/port_verifier.py,sha256=AUtBGuZdfq9Jt4BRDuYesh5YEmwneEzYUgIw-uajZhA,12977
9
+ provider/main.py,sha256=WZEtW_u4djTb7-_gDLFsRDeyDqB5SqB-rjXua_L_P3g,10146
10
+ provider/network/port_verifier.py,sha256=3l6WNwBHydggJRFYkAsuBp1eCxaU619kjWuM-zSVj2o,13267
11
11
  provider/security/ethereum.py,sha256=SDRDbcjynbVy44kNnxlDcYLL0BZ3Qnc0DvmneQ-WKLE,1383
12
12
  provider/utils/ascii_art.py,sha256=ykBFsztk57GIiz1NJ-EII5UvN74iECqQL4h9VmiW6Z8,3161
13
13
  provider/utils/logging.py,sha256=C_elr0sJROHKQgErYpHJQvfujgh0k4Zf2gg8ZKfrmVk,2590
@@ -17,11 +17,11 @@ provider/utils/setup.py,sha256=Z5dLuBQkb5vdoQsu1HJZwXmu9NWsiBYJ7Vq9-C-_tY8,2932
17
17
  provider/vm/__init__.py,sha256=JGs50tUmzOR1rQ_w4fMY_3XWylmiA1G7KKWZkVw51mY,501
18
18
  provider/vm/cloud_init.py,sha256=E5dDH7dqStRcJNDfbarBBe83-c9N63W8B5ycIrHI8eU,4627
19
19
  provider/vm/models.py,sha256=zkfvP5Z50SPDNajwZTt9NTDIMRQIsZLvSOsuirHEcJM,6256
20
- provider/vm/multipass.py,sha256=FOcsfcJ-NrgBg_fvq_CKOKsQ0xOmk7Z34KXi3ag_Vl8,16603
20
+ provider/vm/multipass.py,sha256=RufJbl39d_mEXruX0gX1zCmEEiQ_DytPXgW6F1qVQaM,16667
21
21
  provider/vm/name_mapper.py,sha256=MrshNeJ4Dw-WBsyiIVcn9N5xyOxaBKX4Yqhyh_m5IFg,4103
22
- provider/vm/port_manager.py,sha256=d03uwU76vx6LgADMN8ffBT9t400XQ3vtYlXr6cLIFN0,9831
23
- provider/vm/proxy_manager.py,sha256=k12bjq1WkizkpUJIyFEoHgT21vhy0l2pSggp3m-8bFc,10895
24
- golem_vm_provider-0.1.19.dist-info/METADATA,sha256=zt40xePOqxAmby_p-Keplksm4vnBXJ8k3H5G4H6FEqs,10594
25
- golem_vm_provider-0.1.19.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
26
- golem_vm_provider-0.1.19.dist-info/entry_points.txt,sha256=E4rCWo_Do_2zCG_GewNuftfVlHF_8b_OvioZre0dfeA,54
27
- golem_vm_provider-0.1.19.dist-info/RECORD,,
22
+ provider/vm/port_manager.py,sha256=KhxJxYs2PN-LDcOfwTa7fCUuZr9PFTKz6kxxVpKHJdU,10477
23
+ provider/vm/proxy_manager.py,sha256=dlgrlJEjr_Fyrm9JyyOHMWPf547HYUU-fRSh32XSvjk,14419
24
+ golem_vm_provider-0.1.21.dist-info/METADATA,sha256=eE8pDXbsbX8uPzOhs4qkYyggLz49fDitWIwdWqcCtRw,10594
25
+ golem_vm_provider-0.1.21.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
26
+ golem_vm_provider-0.1.21.dist-info/entry_points.txt,sha256=E4rCWo_Do_2zCG_GewNuftfVlHF_8b_OvioZre0dfeA,54
27
+ golem_vm_provider-0.1.21.dist-info/RECORD,,
provider/main.py CHANGED
@@ -18,28 +18,44 @@ app = FastAPI(title="VM on Golem Provider")
18
18
  async def setup_provider() -> None:
19
19
  """Setup and initialize the provider components."""
20
20
  try:
21
- # Port manager is already initialized and verified in startup_event
22
- port_manager = app.state.port_manager
23
-
24
- # Create resource tracker
21
+ # Create resource tracker first
25
22
  logger.process("🔄 Initializing resource tracker...")
26
23
  resource_tracker = ResourceTracker()
27
24
  app.state.resource_tracker = resource_tracker
28
-
29
- # Create provider with resource tracker and port manager
25
+
26
+ # Create provider with resource tracker and temporary port manager
30
27
  logger.process("🔄 Initializing VM provider...")
31
- provider = MultipassProvider(resource_tracker, port_manager=port_manager)
28
+ provider = MultipassProvider(resource_tracker, port_manager=None) # Will be set later
29
+
32
30
  try:
31
+ # Initialize provider (without port operations)
33
32
  await asyncio.wait_for(provider.initialize(), timeout=30)
34
33
 
35
- # Store provider and proxy manager references
34
+ # Store provider reference
36
35
  app.state.provider = provider
37
36
  app.state.proxy_manager = provider.proxy_manager
38
37
 
39
- # Restore proxy configurations
38
+ # Restore proxy configurations first
40
39
  logger.process("🔄 Restoring proxy configurations...")
41
40
  await app.state.proxy_manager._load_state()
42
41
 
42
+ # Now initialize port manager with knowledge of restored proxies
43
+ logger.process("🔄 Initializing port manager...")
44
+ port_manager = PortManager(
45
+ start_port=settings.PORT_RANGE_START,
46
+ end_port=settings.PORT_RANGE_END,
47
+ discovery_port=settings.PORT,
48
+ existing_ports=app.state.proxy_manager.get_active_ports()
49
+ )
50
+
51
+ if not await port_manager.initialize():
52
+ raise RuntimeError("Port verification failed")
53
+
54
+ # Update provider and proxy manager with verified port manager
55
+ app.state.port_manager = port_manager
56
+ provider.port_manager = port_manager
57
+ app.state.proxy_manager.port_manager = port_manager
58
+
43
59
  except asyncio.TimeoutError:
44
60
  logger.error("Provider initialization timed out")
45
61
  raise
@@ -198,15 +198,21 @@ class PortVerifier:
198
198
  ))
199
199
  logger.warning(error_msg)
200
200
 
201
- # If no successful verifications, mark all ports as inaccessible
202
- if not any(result.accessible for result in results.values()):
201
+ # If no servers were successful, fail verification
202
+ if not any(attempt.success for attempt in attempts):
203
203
  error_msg = (
204
- "Failed to verify ports with any server. Please ensure:\n"
204
+ "Failed to connect to any port check servers. Please ensure:\n"
205
205
  "1. At least one port check server is running and accessible\n"
206
206
  "2. Your network connection is stable\n"
207
207
  "3. The server URLs are correct"
208
208
  )
209
209
  logger.error(error_msg)
210
+ raise RuntimeError(error_msg)
211
+
212
+ # If no successful verifications but servers were reachable, mark ports as inaccessible
213
+ if not any(result.accessible for result in results.values()):
214
+ error_msg = "No ports were verified as accessible"
215
+ logger.error(error_msg)
210
216
  results = {
211
217
  port: PortVerificationResult(
212
218
  port=port,
provider/vm/multipass.py CHANGED
@@ -37,8 +37,11 @@ class MultipassProvider(VMProvider):
37
37
  self.vm_data_dir.mkdir(parents=True, exist_ok=True)
38
38
 
39
39
  # Initialize managers
40
- self.proxy_manager = PythonProxyManager(port_manager=port_manager)
41
40
  self.name_mapper = VMNameMapper(self.vm_data_dir / "vm_names.json")
41
+ self.proxy_manager = PythonProxyManager(
42
+ port_manager=port_manager,
43
+ name_mapper=self.name_mapper
44
+ )
42
45
 
43
46
  def _verify_installation(self) -> None:
44
47
  """Verify multipass is installed and get version."""
@@ -23,7 +23,8 @@ class PortManager:
23
23
  end_port: int = 50900,
24
24
  state_file: Optional[str] = None,
25
25
  port_check_servers: Optional[List[str]] = None,
26
- discovery_port: Optional[int] = None
26
+ discovery_port: Optional[int] = None,
27
+ existing_ports: Optional[Set[int]] = None
27
28
  ):
28
29
  """Initialize the port manager.
29
30
 
@@ -32,6 +33,8 @@ class PortManager:
32
33
  end_port: End of port range (exclusive)
33
34
  state_file: Path to persist port assignments
34
35
  port_check_servers: List of URLs for port checking services
36
+ discovery_port: Port used for discovery service
37
+ existing_ports: Set of ports that should be considered in use
35
38
  """
36
39
  self.start_port = start_port
37
40
  self.end_port = end_port
@@ -40,12 +43,12 @@ class PortManager:
40
43
  self.lock = Lock()
41
44
  self._used_ports: dict[str, int] = {} # vm_id -> port
42
45
  self.verified_ports: Set[int] = set()
46
+ self._existing_ports = existing_ports or set()
43
47
 
44
48
  # Initialize port verifier with default servers
45
49
  self.port_check_servers = port_check_servers or [
46
50
  "http://localhost:9000", # Local development server
47
51
  "http://195.201.39.101:9000", # Production servers
48
-
49
52
  ]
50
53
  self.discovery_port = discovery_port or settings.PORT
51
54
  self.port_verifier = PortVerifier(
@@ -53,7 +56,14 @@ class PortManager:
53
56
  discovery_port=self.discovery_port
54
57
  )
55
58
 
59
+ # Load state after setting existing ports
56
60
  self._load_state()
61
+
62
+ # Mark existing ports as used and remove from verified ports
63
+ for port in self._existing_ports:
64
+ if port in self.verified_ports:
65
+ self.verified_ports.remove(port)
66
+ logger.debug(f"Marked port {port} as in use from existing ports")
57
67
 
58
68
  async def initialize(self) -> bool:
59
69
  """Initialize port manager with verification.
@@ -70,8 +80,9 @@ class PortManager:
70
80
  )
71
81
  display.print_header()
72
82
 
73
- # Only verify SSH ports since provider port was already verified
74
- ssh_ports = list(range(self.start_port, self.end_port))
83
+ # Only verify ports that aren't already marked as in use
84
+ available_ports = set(range(self.start_port, self.end_port)) - self._existing_ports
85
+ ssh_ports = list(available_ports)
75
86
  logger.info(f"Starting port verification...")
76
87
  logger.info(f"SSH ports range: {self.start_port}-{self.end_port}")
77
88
  logger.info(
@@ -3,7 +3,7 @@ import json
3
3
  import asyncio
4
4
  import logging
5
5
  from pathlib import Path
6
- from typing import Optional, Dict
6
+ from typing import Optional, Dict, Set
7
7
  from asyncio import Task, Transport, Protocol
8
8
 
9
9
  from .port_manager import PortManager
@@ -146,59 +146,145 @@ class PythonProxyManager:
146
146
 
147
147
  def __init__(
148
148
  self,
149
- port_manager: PortManager,
149
+ port_manager: Optional[PortManager],
150
+ name_mapper: "VMNameMapper",
150
151
  state_file: Optional[str] = None
151
152
  ):
152
153
  """Initialize the proxy manager.
153
154
 
154
155
  Args:
155
- port_manager: Port allocation manager
156
+ port_manager: Port allocation manager (optional during startup)
157
+ name_mapper: VM name mapping manager
156
158
  state_file: Path to persist proxy state
157
159
  """
158
160
  self.port_manager = port_manager
161
+ self.name_mapper = name_mapper
159
162
  self.state_file = state_file or os.path.expanduser("~/.golem/provider/proxy_state.json")
160
- self._proxies: Dict[str, ProxyServer] = {} # vm_id -> ProxyServer
161
- # Note: _load_state is now async and will be called explicitly during provider setup
163
+ self._proxies: Dict[str, ProxyServer] = {} # multipass_name -> ProxyServer
164
+ self._state_version = 1 # For future state schema migrations
165
+ self._active_ports: Dict[str, int] = {} # multipass_name -> port
162
166
 
167
+ def get_active_ports(self) -> Set[int]:
168
+ """Get set of ports that should be considered in use.
169
+
170
+ Returns:
171
+ Set of ports that are allocated to VMs
172
+ """
173
+ return set(self._active_ports.values())
174
+
163
175
  async def _load_state(self) -> None:
164
176
  """Load and restore proxy state from file."""
165
177
  try:
166
178
  state_path = Path(self.state_file)
167
- if state_path.exists():
168
- with open(state_path, 'r') as f:
169
- state = json.load(f)
170
- # Restore proxy servers from saved state
171
- restore_tasks = []
172
- for vm_id, proxy_info in state.items():
173
- # Create task to restore proxy
174
- task = self.add_vm(
175
- vm_id=vm_id,
176
- vm_ip=proxy_info['target'],
177
- port=proxy_info['port']
178
- )
179
- restore_tasks.append(task)
180
-
181
- # Wait for all proxies to be restored
182
- if restore_tasks:
183
- results = await asyncio.gather(*restore_tasks, return_exceptions=True)
184
- successful = sum(1 for r in results if r is True)
185
- logger.info(f"Restored {successful}/{len(state)} proxy configurations")
179
+ if not state_path.exists():
180
+ return
181
+
182
+ with open(state_path, 'r') as f:
183
+ state = json.load(f)
184
+
185
+ # Check state version for future migrations
186
+ if state.get('version', 1) != self._state_version:
187
+ logger.warning(f"State version mismatch: {state.get('version')} != {self._state_version}")
188
+
189
+ # First load all port allocations
190
+ for requestor_name, proxy_info in state.get('proxies', {}).items():
191
+ multipass_name = await self.name_mapper.get_multipass_name(requestor_name)
192
+ if multipass_name:
193
+ self._active_ports[multipass_name] = proxy_info['port']
194
+
195
+ # Then attempt to restore proxies with retries
196
+ restore_tasks = []
197
+ for requestor_name, proxy_info in state.get('proxies', {}).items():
198
+ multipass_name = await self.name_mapper.get_multipass_name(requestor_name)
199
+ if multipass_name:
200
+ task = self._restore_proxy_with_retry(
201
+ multipass_name=multipass_name,
202
+ vm_ip=proxy_info['target'],
203
+ port=proxy_info['port']
204
+ )
205
+ restore_tasks.append(task)
206
+ else:
207
+ logger.warning(f"No multipass name found for requestor VM {requestor_name}")
208
+
209
+ # Wait for all restore attempts
210
+ if restore_tasks:
211
+ results = await asyncio.gather(*restore_tasks, return_exceptions=True)
212
+ successful = sum(1 for r in results if r is True)
213
+ logger.info(f"Restored {successful}/{len(state.get('proxies', {}))} proxy configurations")
214
+
186
215
  except Exception as e:
187
216
  logger.error(f"Failed to load proxy state: {e}")
217
+
218
+ async def _restore_proxy_with_retry(
219
+ self,
220
+ multipass_name: str,
221
+ vm_ip: str,
222
+ port: int,
223
+ max_retries: int = 3,
224
+ initial_delay: float = 1.0
225
+ ) -> bool:
226
+ """Attempt to restore a proxy with exponential backoff retry.
227
+
228
+ Args:
229
+ multipass_name: Multipass VM name
230
+ vm_ip: VM IP address
231
+ port: Port to use
232
+ max_retries: Maximum number of retry attempts
233
+ initial_delay: Initial delay between retries (doubles each attempt)
234
+
235
+ Returns:
236
+ bool: True if restoration was successful
237
+ """
238
+ delay = initial_delay
239
+ for attempt in range(max_retries):
240
+ try:
241
+ if attempt > 0:
242
+ logger.info(f"Retry attempt {attempt + 1} for {multipass_name} on port {port}")
243
+ await asyncio.sleep(delay)
244
+ delay *= 2 # Exponential backoff
245
+
246
+ # Attempt to create proxy
247
+ proxy = ProxyServer(port, vm_ip)
248
+ await proxy.start()
249
+
250
+ self._proxies[multipass_name] = proxy
251
+ logger.info(f"Successfully restored proxy for {multipass_name} on port {port}")
252
+ return True
253
+
254
+ except Exception as e:
255
+ logger.warning(f"Attempt {attempt + 1} failed for {multipass_name}: {e}")
256
+ if attempt == max_retries - 1:
257
+ logger.error(f"Failed to restore proxy for {multipass_name} after {max_retries} attempts")
258
+ # Remove from active ports if all retries failed
259
+ self._active_ports.pop(multipass_name, None)
260
+ return False
188
261
 
189
- def _save_state(self) -> None:
190
- """Save current proxy state to file."""
262
+ async def _save_state(self) -> None:
263
+ """Save current proxy state to file using requestor names."""
191
264
  try:
192
265
  state = {
193
- vm_id: {
194
- 'port': proxy.listen_port,
195
- 'target': proxy.target_host
196
- }
197
- for vm_id, proxy in self._proxies.items()
266
+ 'version': self._state_version,
267
+ 'proxies': {}
198
268
  }
269
+
270
+ for multipass_name, proxy in self._proxies.items():
271
+ requestor_name = await self.name_mapper.get_requestor_name(multipass_name)
272
+ if requestor_name:
273
+ state['proxies'][requestor_name] = {
274
+ 'port': proxy.listen_port,
275
+ 'target': proxy.target_host
276
+ }
277
+
278
+ # Save to temporary file first
279
+ temp_file = f"{self.state_file}.tmp"
199
280
  os.makedirs(os.path.dirname(self.state_file), exist_ok=True)
200
- with open(self.state_file, 'w') as f:
201
- json.dump(state, f)
281
+
282
+ with open(temp_file, 'w') as f:
283
+ json.dump(state, f, indent=2)
284
+
285
+ # Atomic rename
286
+ os.replace(temp_file, self.state_file)
287
+
202
288
  except Exception as e:
203
289
  logger.error(f"Failed to save proxy state: {e}")
204
290
 
@@ -206,7 +292,7 @@ class PythonProxyManager:
206
292
  """Add proxy configuration for a new VM.
207
293
 
208
294
  Args:
209
- vm_id: Unique identifier for the VM
295
+ vm_id: Unique identifier for the VM (multipass name)
210
296
  vm_ip: IP address of the VM
211
297
  port: Optional specific port to use, if not provided one will be allocated
212
298
 
@@ -226,7 +312,7 @@ class PythonProxyManager:
226
312
  await proxy.start()
227
313
 
228
314
  self._proxies[vm_id] = proxy
229
- self._save_state()
315
+ await self._save_state()
230
316
 
231
317
  logger.info(f"Started proxy for VM {vm_id} on port {port}")
232
318
  return True
@@ -242,14 +328,14 @@ class PythonProxyManager:
242
328
  """Remove proxy configuration for a VM.
243
329
 
244
330
  Args:
245
- vm_id: Unique identifier for the VM
331
+ vm_id: Unique identifier for the VM (multipass name)
246
332
  """
247
333
  try:
248
334
  if vm_id in self._proxies:
249
335
  proxy = self._proxies.pop(vm_id)
250
336
  await proxy.stop()
251
337
  self.port_manager.deallocate_port(vm_id)
252
- self._save_state()
338
+ await self._save_state()
253
339
  logger.info(f"Removed proxy for VM {vm_id}")
254
340
  except Exception as e:
255
341
  logger.error(f"Failed to remove proxy for VM {vm_id}: {e}")
@@ -270,7 +356,7 @@ class PythonProxyManager:
270
356
  cleanup_errors.append(f"Failed to remove proxy for VM {vm_id}: {e}")
271
357
 
272
358
  try:
273
- self._save_state()
359
+ await self._save_state()
274
360
  except Exception as e:
275
361
  cleanup_errors.append(f"Failed to save state: {e}")
276
362