golem-vm-provider 0.1.54__py3-none-any.whl → 0.1.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {golem_vm_provider-0.1.54.dist-info → golem_vm_provider-0.1.55.dist-info}/METADATA +4 -1
- {golem_vm_provider-0.1.54.dist-info → golem_vm_provider-0.1.55.dist-info}/RECORD +6 -6
- provider/payments/monitor.py +27 -7
- provider/service.py +62 -0
- {golem_vm_provider-0.1.54.dist-info → golem_vm_provider-0.1.55.dist-info}/WHEEL +0 -0
- {golem_vm_provider-0.1.54.dist-info → golem_vm_provider-0.1.55.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: golem-vm-provider
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.55
|
4
4
|
Summary: VM on Golem Provider Node - Run your own provider node to offer VMs on the Golem Network
|
5
5
|
Keywords: golem,vm,provider,cloud,decentralized
|
6
6
|
Author: Phillip Jensen
|
@@ -510,6 +510,7 @@ sequenceDiagram
|
|
510
510
|
participant DS as Discovery Service
|
511
511
|
|
512
512
|
P->>RT: Initialize
|
513
|
+
P->>RT: Sync with existing VMs
|
513
514
|
RT->>AD: Register Callback
|
514
515
|
loop Every 4 minutes
|
515
516
|
AD->>RT: Get Resources
|
@@ -519,6 +520,8 @@ sequenceDiagram
|
|
519
520
|
end
|
520
521
|
```
|
521
522
|
|
523
|
+
On startup, the provider syncs the resource tracker with all VMs currently running on the host (via Multipass). This ensures advertisements reflect already-allocated CPU, RAM, and storage after restarts, preventing false “outdated advertisement” updates when existing VMs are consuming resources. The sync is based on actual VMs present, independent of any still-open payment streams.
|
524
|
+
|
522
525
|
### Monitoring
|
523
526
|
|
524
527
|
The provider includes comprehensive logging:
|
@@ -16,12 +16,12 @@ provider/discovery/service.py,sha256=vX_mVSxvn3arnb2cKDM_SeJp1ZgPdImP2aUubeXgdRg
|
|
16
16
|
provider/main.py,sha256=_j92g56B-d8CE09Ugv0fqWVMi5jw_iuTrysxSw7845A,32309
|
17
17
|
provider/network/port_verifier.py,sha256=3l6WNwBHydggJRFYkAsuBp1eCxaU619kjWuM-zSVj2o,13267
|
18
18
|
provider/payments/blockchain_service.py,sha256=4GrzDKwCSUVoENqjD4RLyJ0qwBOJKMyVk5Li-XNsyTc,3567
|
19
|
-
provider/payments/monitor.py,sha256=
|
19
|
+
provider/payments/monitor.py,sha256=seo8vE622IdbcRE3x69IpvHn2mel_tlMNGt_DxOIoww,5386
|
20
20
|
provider/payments/stream_map.py,sha256=qk6Y8hS72DplAifZ0ZMWPHBAyc_3IWIQyWUBuCU3_To,1191
|
21
21
|
provider/security/ethereum.py,sha256=EwPZj4JR8OEpto6LhKjuuT3Z9pBX6P7-UQaqJtqFkYQ,1242
|
22
22
|
provider/security/faucet.py,sha256=8T4lW1fVQgUk8EQILgbrr9UUosw9e7eA40tlZ2_KCPQ,4368
|
23
23
|
provider/security/l2_faucet.py,sha256=yRV4xdPBgU8-LDTLqtuAijfgIoe2kYxvXqJLxFd-BVI,2662
|
24
|
-
provider/service.py,sha256=
|
24
|
+
provider/service.py,sha256=hlQn0woppsYFHZDMEgq-40cOjmiPWruiWLy_dQvaCRU,6859
|
25
25
|
provider/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
26
|
provider/utils/ascii_art.py,sha256=ykBFsztk57GIiz1NJ-EII5UvN74iECqQL4h9VmiW6Z8,3161
|
27
27
|
provider/utils/logging.py,sha256=VV3oTYSRT8hUejtXLuua1M6kCHmIJgPspIkzsUVhYW0,1920
|
@@ -39,7 +39,7 @@ provider/vm/port_manager.py,sha256=iYSwjTjD_ziOhG8aI7juKHw1OwwRUTJQyQoRUNQvz9w,1
|
|
39
39
|
provider/vm/provider.py,sha256=A7QN89EJjcSS40_SmKeinG1Jp_NGffJaLse-XdKciAs,1164
|
40
40
|
provider/vm/proxy_manager.py,sha256=n4NTsyz2rtrvjtf_ceKBk-g2q_mzqPwruB1q7UlQVBc,14928
|
41
41
|
provider/vm/service.py,sha256=Ki4SGNIZUq3XmaPMwAOoNzdZzKQsmFXid374wgjFPes,4636
|
42
|
-
golem_vm_provider-0.1.
|
43
|
-
golem_vm_provider-0.1.
|
44
|
-
golem_vm_provider-0.1.
|
45
|
-
golem_vm_provider-0.1.
|
42
|
+
golem_vm_provider-0.1.55.dist-info/METADATA,sha256=_GZ2hyX-aeTtK--VOiSE4tZfAnQwANw21tO63EvOskY,18877
|
43
|
+
golem_vm_provider-0.1.55.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
44
|
+
golem_vm_provider-0.1.55.dist-info/entry_points.txt,sha256=5Jiie1dIXygmxmDW66bKKxQpmBLJ7leSKRrb8bkQALw,52
|
45
|
+
golem_vm_provider-0.1.55.dist-info/RECORD,,
|
provider/payments/monitor.py
CHANGED
@@ -56,21 +56,41 @@ class StreamMonitor:
|
|
56
56
|
logger.warning(f"stream {stream_id} lookup failed: {e}")
|
57
57
|
continue
|
58
58
|
# Stop VM if remaining runway < threshold
|
59
|
-
remaining = max(s["stopTime"] - now, 0)
|
59
|
+
remaining = max(int(s["stopTime"]) - int(now), 0)
|
60
60
|
logger.debug(
|
61
61
|
f"stream {stream_id} for VM {vm_id}: start={s['startTime']} stop={s['stopTime']} "
|
62
62
|
f"rate={s['ratePerSecond']} withdrawn={s['withdrawn']} halted={s['halted']} remaining={remaining}s"
|
63
63
|
)
|
64
|
-
|
65
|
-
|
64
|
+
# If stream is force-halted, delete immediately to free all resources
|
65
|
+
if bool(s.get("halted")):
|
66
|
+
logger.info(
|
67
|
+
f"Deleting VM {vm_id} due to halted stream (id={stream_id}, now={now})"
|
68
|
+
)
|
69
|
+
try:
|
70
|
+
await self.vm_service.delete_vm(vm_id)
|
71
|
+
except Exception as e:
|
72
|
+
logger.warning(f"delete_vm failed for {vm_id}: {e}")
|
73
|
+
try:
|
74
|
+
await self.stream_map.remove(vm_id)
|
75
|
+
except Exception as e:
|
76
|
+
logger.debug(f"failed to remove vm {vm_id} from stream map: {e}")
|
77
|
+
continue
|
78
|
+
|
79
|
+
# Only stop a VM when runway is completely empty
|
80
|
+
if remaining == 0:
|
81
|
+
logger.info(
|
82
|
+
f"Stopping VM {vm_id} as stream runway is exhausted (id={stream_id}, now={now}, stop={s.get('stopTime')})"
|
83
|
+
)
|
66
84
|
try:
|
67
85
|
await self.vm_service.stop_vm(vm_id)
|
68
86
|
except Exception as e:
|
69
87
|
logger.warning(f"stop_vm failed for {vm_id}: {e}")
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
88
|
+
continue
|
89
|
+
|
90
|
+
# Otherwise, do not stop; just log health and consider withdrawals
|
91
|
+
logger.debug(
|
92
|
+
f"VM {vm_id} stream {stream_id} healthy (remaining={remaining}s)"
|
93
|
+
)
|
74
94
|
# Withdraw if enough vested and configured
|
75
95
|
if self._get("STREAM_WITHDRAW_ENABLED", False) and self.client:
|
76
96
|
vested = max(min(now, s["stopTime"]) - s["startTime"], 0) * s["ratePerSecond"]
|
provider/service.py
CHANGED
@@ -37,6 +37,68 @@ class ProviderService:
|
|
37
37
|
# Initialize services
|
38
38
|
await self.port_manager.initialize()
|
39
39
|
await self.vm_service.provider.initialize()
|
40
|
+
|
41
|
+
# Before starting advertisement, sync allocated resources with existing VMs
|
42
|
+
try:
|
43
|
+
vm_resources = await self.vm_service.get_all_vms_resources()
|
44
|
+
await self.vm_service.resource_tracker.sync_with_multipass(vm_resources)
|
45
|
+
except Exception as e:
|
46
|
+
logger.warning(f"Failed to sync resources with existing VMs: {e}")
|
47
|
+
|
48
|
+
# Cross-check running VMs against payment streams. If a VM has no
|
49
|
+
# active stream, it is no longer rented: terminate it and free resources.
|
50
|
+
try:
|
51
|
+
# Only perform checks if payments are configured
|
52
|
+
if settings.STREAM_PAYMENT_ADDRESS and not settings.STREAM_PAYMENT_ADDRESS.lower().endswith("0000000000000000000000000000000000000000") and settings.POLYGON_RPC_URL:
|
53
|
+
stream_map = app.container.stream_map()
|
54
|
+
reader = app.container.stream_reader()
|
55
|
+
|
56
|
+
# Use the most recent view of VMs from the previous sync
|
57
|
+
vm_ids = list(vm_resources.keys()) if 'vm_resources' in locals() else []
|
58
|
+
for vm_id in vm_ids:
|
59
|
+
try:
|
60
|
+
stream_id = await stream_map.get(vm_id)
|
61
|
+
except Exception:
|
62
|
+
stream_id = None
|
63
|
+
|
64
|
+
if stream_id is None:
|
65
|
+
reason = "no stream mapped"
|
66
|
+
should_terminate = True
|
67
|
+
else:
|
68
|
+
try:
|
69
|
+
ok, msg = reader.verify_stream(int(stream_id), settings.PROVIDER_ID)
|
70
|
+
should_terminate = not ok
|
71
|
+
reason = msg if not ok else "ok"
|
72
|
+
except Exception as e:
|
73
|
+
# If verification cannot be performed, be conservative and keep the VM
|
74
|
+
logger.warning(f"Stream verification error for VM {vm_id} (stream {stream_id}): {e}")
|
75
|
+
should_terminate = False
|
76
|
+
reason = f"verification error: {e}"
|
77
|
+
|
78
|
+
if should_terminate:
|
79
|
+
logger.info(
|
80
|
+
f"Deleting VM {vm_id}: inactive stream (stream_id={stream_id}, reason={reason})"
|
81
|
+
)
|
82
|
+
try:
|
83
|
+
await self.vm_service.delete_vm(vm_id)
|
84
|
+
except Exception as e:
|
85
|
+
logger.warning(f"Failed to delete VM {vm_id}: {e}")
|
86
|
+
try:
|
87
|
+
await stream_map.remove(vm_id)
|
88
|
+
except Exception:
|
89
|
+
pass
|
90
|
+
|
91
|
+
# Re-sync after any terminations to ensure ads reflect capacity
|
92
|
+
try:
|
93
|
+
vm_resources = await self.vm_service.get_all_vms_resources()
|
94
|
+
await self.vm_service.resource_tracker.sync_with_multipass(vm_resources)
|
95
|
+
except Exception as e:
|
96
|
+
logger.warning(f"Post-termination resource sync failed: {e}")
|
97
|
+
else:
|
98
|
+
logger.info("Payments not configured; skipping startup stream checks")
|
99
|
+
except Exception as e:
|
100
|
+
logger.warning(f"Failed to reconcile VMs with payment streams: {e}")
|
101
|
+
|
40
102
|
await self.advertisement_service.start()
|
41
103
|
# Start pricing auto-updater; trigger re-advertise after updates
|
42
104
|
async def _on_price_updated(platform: str, glm_usd):
|
File without changes
|
File without changes
|