nexaroa 0.0.111__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuroshard/__init__.py +93 -0
- neuroshard/__main__.py +4 -0
- neuroshard/cli.py +466 -0
- neuroshard/core/__init__.py +92 -0
- neuroshard/core/consensus/verifier.py +252 -0
- neuroshard/core/crypto/__init__.py +20 -0
- neuroshard/core/crypto/ecdsa.py +392 -0
- neuroshard/core/economics/__init__.py +52 -0
- neuroshard/core/economics/constants.py +387 -0
- neuroshard/core/economics/ledger.py +2111 -0
- neuroshard/core/economics/market.py +975 -0
- neuroshard/core/economics/wallet.py +168 -0
- neuroshard/core/governance/__init__.py +74 -0
- neuroshard/core/governance/proposal.py +561 -0
- neuroshard/core/governance/registry.py +545 -0
- neuroshard/core/governance/versioning.py +332 -0
- neuroshard/core/governance/voting.py +453 -0
- neuroshard/core/model/__init__.py +30 -0
- neuroshard/core/model/dynamic.py +4186 -0
- neuroshard/core/model/llm.py +905 -0
- neuroshard/core/model/registry.py +164 -0
- neuroshard/core/model/scaler.py +387 -0
- neuroshard/core/model/tokenizer.py +568 -0
- neuroshard/core/network/__init__.py +56 -0
- neuroshard/core/network/connection_pool.py +72 -0
- neuroshard/core/network/dht.py +130 -0
- neuroshard/core/network/dht_plan.py +55 -0
- neuroshard/core/network/dht_proof_store.py +516 -0
- neuroshard/core/network/dht_protocol.py +261 -0
- neuroshard/core/network/dht_service.py +506 -0
- neuroshard/core/network/encrypted_channel.py +141 -0
- neuroshard/core/network/nat.py +201 -0
- neuroshard/core/network/nat_traversal.py +695 -0
- neuroshard/core/network/p2p.py +929 -0
- neuroshard/core/network/p2p_data.py +150 -0
- neuroshard/core/swarm/__init__.py +106 -0
- neuroshard/core/swarm/aggregation.py +729 -0
- neuroshard/core/swarm/buffers.py +643 -0
- neuroshard/core/swarm/checkpoint.py +709 -0
- neuroshard/core/swarm/compute.py +624 -0
- neuroshard/core/swarm/diloco.py +844 -0
- neuroshard/core/swarm/factory.py +1288 -0
- neuroshard/core/swarm/heartbeat.py +669 -0
- neuroshard/core/swarm/logger.py +487 -0
- neuroshard/core/swarm/router.py +658 -0
- neuroshard/core/swarm/service.py +640 -0
- neuroshard/core/training/__init__.py +29 -0
- neuroshard/core/training/checkpoint.py +600 -0
- neuroshard/core/training/distributed.py +1602 -0
- neuroshard/core/training/global_tracker.py +617 -0
- neuroshard/core/training/production.py +276 -0
- neuroshard/governance_cli.py +729 -0
- neuroshard/grpc_server.py +895 -0
- neuroshard/runner.py +3223 -0
- neuroshard/sdk/__init__.py +92 -0
- neuroshard/sdk/client.py +990 -0
- neuroshard/sdk/errors.py +101 -0
- neuroshard/sdk/types.py +282 -0
- neuroshard/tracker/__init__.py +0 -0
- neuroshard/tracker/server.py +864 -0
- neuroshard/ui/__init__.py +0 -0
- neuroshard/ui/app.py +102 -0
- neuroshard/ui/templates/index.html +1052 -0
- neuroshard/utils/__init__.py +0 -0
- neuroshard/utils/autostart.py +81 -0
- neuroshard/utils/hardware.py +121 -0
- neuroshard/utils/serialization.py +90 -0
- neuroshard/version.py +1 -0
- nexaroa-0.0.111.dist-info/METADATA +283 -0
- nexaroa-0.0.111.dist-info/RECORD +78 -0
- nexaroa-0.0.111.dist-info/WHEEL +5 -0
- nexaroa-0.0.111.dist-info/entry_points.txt +4 -0
- nexaroa-0.0.111.dist-info/licenses/LICENSE +190 -0
- nexaroa-0.0.111.dist-info/top_level.txt +2 -0
- protos/__init__.py +0 -0
- protos/neuroshard.proto +651 -0
- protos/neuroshard_pb2.py +160 -0
- protos/neuroshard_pb2_grpc.py +1298 -0
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
import threading
|
|
4
|
+
from typing import List, Optional
|
|
5
|
+
|
|
6
|
+
from protos import neuroshard_pb2
|
|
7
|
+
from protos import neuroshard_pb2_grpc
|
|
8
|
+
from neuroshard.core.network.dht import Node, RoutingTable, KBucket
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
def proto_to_node(proto_node: neuroshard_pb2.DHTNodeInfo) -> Node:
|
|
13
|
+
# Convert bytes to int
|
|
14
|
+
node_id = int.from_bytes(proto_node.node_id, byteorder='big')
|
|
15
|
+
return Node(node_id, proto_node.ip, proto_node.port)
|
|
16
|
+
|
|
17
|
+
def node_to_proto(node: Node) -> neuroshard_pb2.DHTNodeInfo:
|
|
18
|
+
# Convert int to bytes (20 bytes for 160 bits)
|
|
19
|
+
# Ensure it's 20 bytes length
|
|
20
|
+
try:
|
|
21
|
+
id_bytes = node.id.to_bytes(20, byteorder='big')
|
|
22
|
+
except OverflowError:
|
|
23
|
+
# Handle potential ID size issues if we mess up somewhere
|
|
24
|
+
id_bytes = node.id.to_bytes((node.id.bit_length() + 7) // 8, byteorder='big')
|
|
25
|
+
# Pad to 20 if needed? Or just assume big enough. Kademlia typically fixed size.
|
|
26
|
+
# For now let's stick to standard 20 bytes.
|
|
27
|
+
|
|
28
|
+
return neuroshard_pb2.DHTNodeInfo(
|
|
29
|
+
node_id=id_bytes,
|
|
30
|
+
ip=node.ip,
|
|
31
|
+
port=node.port
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
class DHTServiceMixin:
|
|
35
|
+
"""
|
|
36
|
+
Mixin to add DHT capabilities to the main NeuroShardServiceServicer.
|
|
37
|
+
We use a mixin to keep the code clean and separated.
|
|
38
|
+
"""
|
|
39
|
+
def __init__(self, routing_table: RoutingTable, storage: dict, ledger: Optional[object] = None):
|
|
40
|
+
self.routing_table = routing_table
|
|
41
|
+
self.storage = storage # Simple in-memory dict for now
|
|
42
|
+
self.ledger = ledger # LedgerManager instance
|
|
43
|
+
|
|
44
|
+
def _update_routing(self, sender_proto: neuroshard_pb2.DHTNodeInfo):
|
|
45
|
+
"""Update routing table with the sender's info"""
|
|
46
|
+
node = proto_to_node(sender_proto)
|
|
47
|
+
self.routing_table.add_contact(node)
|
|
48
|
+
|
|
49
|
+
def DHTPing(self, request, context):
|
|
50
|
+
self._update_routing(request.sender)
|
|
51
|
+
|
|
52
|
+
return neuroshard_pb2.DHTPingResponse(
|
|
53
|
+
responder=node_to_proto(self.routing_table.local_node)
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def DHTStore(self, request, context):
|
|
57
|
+
self._update_routing(request.sender)
|
|
58
|
+
|
|
59
|
+
key_int = int.from_bytes(request.key, byteorder='big')
|
|
60
|
+
new_val = request.value
|
|
61
|
+
|
|
62
|
+
import json
|
|
63
|
+
current_val = self.storage.get(key_int)
|
|
64
|
+
|
|
65
|
+
stored_list = []
|
|
66
|
+
if current_val:
|
|
67
|
+
try:
|
|
68
|
+
# Try to parse as list
|
|
69
|
+
stored_list = json.loads(current_val)
|
|
70
|
+
if not isinstance(stored_list, list):
|
|
71
|
+
stored_list = [current_val]
|
|
72
|
+
except:
|
|
73
|
+
# Was a simple string
|
|
74
|
+
stored_list = [current_val]
|
|
75
|
+
|
|
76
|
+
# Add new value if not present
|
|
77
|
+
if new_val not in stored_list:
|
|
78
|
+
stored_list.append(new_val)
|
|
79
|
+
|
|
80
|
+
# Limit size (keep most recent 50)
|
|
81
|
+
if len(stored_list) > 50:
|
|
82
|
+
stored_list = stored_list[-50:]
|
|
83
|
+
|
|
84
|
+
self.storage[key_int] = json.dumps(stored_list)
|
|
85
|
+
|
|
86
|
+
return neuroshard_pb2.DHTStoreResponse(
|
|
87
|
+
responder=node_to_proto(self.routing_table.local_node),
|
|
88
|
+
success=True
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def DHTFindNode(self, request, context):
|
|
92
|
+
self._update_routing(request.sender)
|
|
93
|
+
|
|
94
|
+
target_id = int.from_bytes(request.target_id, byteorder='big')
|
|
95
|
+
closest_nodes = self.routing_table.find_closest(target_id)
|
|
96
|
+
|
|
97
|
+
return neuroshard_pb2.DHTFindNodeResponse(
|
|
98
|
+
responder=node_to_proto(self.routing_table.local_node),
|
|
99
|
+
nodes=[node_to_proto(n) for n in closest_nodes]
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def DHTFindValue(self, request, context):
|
|
103
|
+
self._update_routing(request.sender)
|
|
104
|
+
|
|
105
|
+
key_int = int.from_bytes(request.key, byteorder='big')
|
|
106
|
+
|
|
107
|
+
if key_int in self.storage:
|
|
108
|
+
return neuroshard_pb2.DHTFindValueResponse(
|
|
109
|
+
responder=node_to_proto(self.routing_table.local_node),
|
|
110
|
+
value=self.storage[key_int],
|
|
111
|
+
found=True
|
|
112
|
+
)
|
|
113
|
+
else:
|
|
114
|
+
# Return closest nodes
|
|
115
|
+
closest_nodes = self.routing_table.find_closest(key_int)
|
|
116
|
+
return neuroshard_pb2.DHTFindValueResponse(
|
|
117
|
+
responder=node_to_proto(self.routing_table.local_node),
|
|
118
|
+
nodes=[node_to_proto(n) for n in closest_nodes],
|
|
119
|
+
found=False
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def GossipProof(self, request, context):
|
|
123
|
+
"""
|
|
124
|
+
Handle incoming gossip Proof of Neural Work (PoNW).
|
|
125
|
+
|
|
126
|
+
Security Model:
|
|
127
|
+
===============
|
|
128
|
+
1. Proof must have valid signature format (64 hex chars)
|
|
129
|
+
2. Proof must have valid node_id format (32 hex or legacy decimal)
|
|
130
|
+
3. Proof must pass timestamp freshness check (< 5 min old)
|
|
131
|
+
4. Proof must not be a replay (signature not seen before)
|
|
132
|
+
5. Proof must pass rate limiting (per-node limits)
|
|
133
|
+
6. Proof must pass plausibility checks (realistic work claims)
|
|
134
|
+
|
|
135
|
+
All these checks happen in ledger.process_proof().
|
|
136
|
+
"""
|
|
137
|
+
if not self.ledger:
|
|
138
|
+
return neuroshard_pb2.GossipProofResponse(accepted=False)
|
|
139
|
+
|
|
140
|
+
from neuroshard.core.economics.ledger import PoNWProof, ProofType
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
# Determine proof type from request
|
|
144
|
+
proof_type_str = getattr(request, 'proof_type', '')
|
|
145
|
+
if proof_type_str:
|
|
146
|
+
proof_type = proof_type_str
|
|
147
|
+
elif getattr(request, 'training_batches', 0) > 0:
|
|
148
|
+
proof_type = ProofType.TRAINING.value
|
|
149
|
+
elif getattr(request, 'token_count', 0) > 0:
|
|
150
|
+
proof_type = ProofType.INFERENCE.value
|
|
151
|
+
else:
|
|
152
|
+
proof_type = ProofType.UPTIME.value
|
|
153
|
+
|
|
154
|
+
# CRITICAL: Store public key for trustless verification
|
|
155
|
+
# This allows nodes to verify proofs from previously unknown peers
|
|
156
|
+
public_key = getattr(request, 'public_key', None)
|
|
157
|
+
if public_key and self.ledger and self.ledger.crypto:
|
|
158
|
+
stored = self.ledger.crypto.store_public_key(request.node_id, public_key)
|
|
159
|
+
logger.debug(f"Gossip: Stored public key for {request.node_id[:16]}...: {stored}")
|
|
160
|
+
else:
|
|
161
|
+
logger.debug(f"Gossip: No public key provided by {request.node_id[:16]}...")
|
|
162
|
+
|
|
163
|
+
# Reconstruct full PoNWProof from gRPC request
|
|
164
|
+
# CRITICAL: Include data_samples, model_hash, request_id for canonical_payload match
|
|
165
|
+
# Extract current_loss (0.0 means "not set" - convert to None)
|
|
166
|
+
loss_val = getattr(request, 'current_loss', 0.0)
|
|
167
|
+
current_loss = loss_val if loss_val > 0.0 else None
|
|
168
|
+
|
|
169
|
+
proof = PoNWProof(
|
|
170
|
+
node_id=request.node_id,
|
|
171
|
+
proof_type=proof_type,
|
|
172
|
+
timestamp=request.timestamp,
|
|
173
|
+
nonce=getattr(request, 'nonce', '') or f"gossip_{request.signature[:8] if request.signature else 'none'}",
|
|
174
|
+
uptime_seconds=request.uptime,
|
|
175
|
+
tokens_processed=getattr(request, 'token_count', 0),
|
|
176
|
+
training_batches=getattr(request, 'training_batches', 0),
|
|
177
|
+
data_samples=getattr(request, 'data_samples', 0),
|
|
178
|
+
model_hash=getattr(request, 'model_hash', ''),
|
|
179
|
+
request_id=getattr(request, 'request_id', '') or None,
|
|
180
|
+
layers_held=getattr(request, 'layers_held', 0),
|
|
181
|
+
has_embedding=getattr(request, 'has_embedding', False),
|
|
182
|
+
has_lm_head=getattr(request, 'has_lm_head', False),
|
|
183
|
+
signature=request.signature,
|
|
184
|
+
current_loss=current_loss
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Validate and credit - ALL security checks happen here
|
|
188
|
+
success, reward, msg = self.ledger.process_proof(proof)
|
|
189
|
+
|
|
190
|
+
if success:
|
|
191
|
+
logger.info(f"Gossip: Accepted PoNW from {request.node_id[:16]}... "
|
|
192
|
+
f"(type={proof_type}, tokens={proof.tokens_processed}, "
|
|
193
|
+
f"batches={proof.training_batches}, reward={reward:.6f} NEURO)")
|
|
194
|
+
else:
|
|
195
|
+
# Common benign rejections in gossip networks - log as debug to reduce noise
|
|
196
|
+
# - Duplicates: Normal propagation
|
|
197
|
+
# - Proof too old: Syncing nodes or slow propagation
|
|
198
|
+
# - Rate limits: Peer sending too fast (dropped)
|
|
199
|
+
if any(x in msg for x in ["Duplicate", "Proof too old", "Rate limit"]):
|
|
200
|
+
logger.debug(f"Gossip: Ignored PoNW from {request.node_id[:16]}... ({msg})")
|
|
201
|
+
else:
|
|
202
|
+
logger.warning(f"❌ Gossip: Rejected PoNW from {request.node_id[:16]}...: {msg}")
|
|
203
|
+
|
|
204
|
+
return neuroshard_pb2.GossipProofResponse(accepted=success)
|
|
205
|
+
except Exception as e:
|
|
206
|
+
logger.error(f"Gossip processing error: {e}")
|
|
207
|
+
return neuroshard_pb2.GossipProofResponse(accepted=False)
|
|
208
|
+
|
|
209
|
+
def GossipTransaction(self, request, context):
|
|
210
|
+
"""Handle incoming gossip transaction (P2P Transfer)."""
|
|
211
|
+
if not self.ledger:
|
|
212
|
+
return neuroshard_pb2.GossipTransactionResponse(accepted=False, reason="No ledger")
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
# Signature Verification
|
|
216
|
+
# In this production implementation, we verify the signature against the transaction data
|
|
217
|
+
# This prevents tampering and replay attacks
|
|
218
|
+
import hashlib
|
|
219
|
+
|
|
220
|
+
# Reconstruct the signed payload (Canonical String)
|
|
221
|
+
# Format: sender_id:recipient_id:amount:timestamp
|
|
222
|
+
payload = f"{request.sender_id}:{request.recipient_id}:{request.amount}:{request.timestamp}"
|
|
223
|
+
|
|
224
|
+
# Currently using HMAC-SHA256 where node_token acts as the private key
|
|
225
|
+
# To verify a peer's signature without their private key requires Public Key Crypto (RSA/ECDSA).
|
|
226
|
+
# Since we haven't distributed Public Keys in the DHT yet, we rely on the Ledger to detect replay/balances.
|
|
227
|
+
# The signature is stored for future audit/slashing.
|
|
228
|
+
|
|
229
|
+
success = self.ledger.create_transaction(
|
|
230
|
+
from_id=request.sender_id,
|
|
231
|
+
to_id=request.recipient_id,
|
|
232
|
+
amount=request.amount,
|
|
233
|
+
signature=request.signature
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
if success:
|
|
237
|
+
logger.info(f"Gossip: Accepted Transaction {request.amount} NEURO from {request.sender_id} -> {request.recipient_id}")
|
|
238
|
+
return neuroshard_pb2.GossipTransactionResponse(accepted=True)
|
|
239
|
+
else:
|
|
240
|
+
logger.warning(f"❌ Gossip: Rejected Transaction {request.sender_id} -> {request.recipient_id}")
|
|
241
|
+
return neuroshard_pb2.GossipTransactionResponse(accepted=False, reason="Validation failed")
|
|
242
|
+
|
|
243
|
+
except Exception as e:
|
|
244
|
+
logger.error(f"Gossip Transaction error: {e}")
|
|
245
|
+
return neuroshard_pb2.GossipTransactionResponse(accepted=False, reason=str(e))
|
|
246
|
+
|
|
247
|
+
def GossipStake(self, request, context):
|
|
248
|
+
"""
|
|
249
|
+
Handle incoming stake gossip from peers.
|
|
250
|
+
|
|
251
|
+
This allows the network to have a consistent view of stakes across nodes.
|
|
252
|
+
Stakes are used to calculate reward multipliers and validate PoNW claims.
|
|
253
|
+
|
|
254
|
+
ECDSA signatures enable trustless verification of stake claims.
|
|
255
|
+
"""
|
|
256
|
+
if not self.ledger:
|
|
257
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="No ledger")
|
|
258
|
+
|
|
259
|
+
try:
|
|
260
|
+
import time
|
|
261
|
+
from neuroshard.core.crypto.ecdsa import verify_signature, is_valid_node_id_format
|
|
262
|
+
|
|
263
|
+
# Basic validation
|
|
264
|
+
if not request.node_id or request.amount < 0:
|
|
265
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Invalid stake data")
|
|
266
|
+
|
|
267
|
+
# Validate node_id format (32 hex chars)
|
|
268
|
+
if not is_valid_node_id_format(request.node_id):
|
|
269
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Invalid node_id format")
|
|
270
|
+
|
|
271
|
+
# Timestamp freshness check (5 minute window)
|
|
272
|
+
if abs(time.time() - request.timestamp) > 300:
|
|
273
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Stale stake update")
|
|
274
|
+
|
|
275
|
+
# Verify ECDSA signature
|
|
276
|
+
# SECURITY: Public key is REQUIRED for verification
|
|
277
|
+
if not request.public_key:
|
|
278
|
+
logger.warning(f"Gossip: Missing public key from {request.node_id[:16]}...")
|
|
279
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Missing public key")
|
|
280
|
+
|
|
281
|
+
payload = f"{request.node_id}:{request.amount}:{request.locked_until}"
|
|
282
|
+
if not verify_signature(request.node_id, payload, request.signature, request.public_key):
|
|
283
|
+
logger.warning(f"Gossip: Invalid stake signature from {request.node_id[:16]}...")
|
|
284
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Invalid signature")
|
|
285
|
+
|
|
286
|
+
# Update our local view of this node's stake
|
|
287
|
+
success = self.ledger.update_stake(
|
|
288
|
+
node_id=request.node_id,
|
|
289
|
+
amount=request.amount,
|
|
290
|
+
locked_until=request.locked_until
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
if success:
|
|
294
|
+
logger.info(f"Gossip: Updated stake for {request.node_id[:16]}... = {request.amount:.2f} NEURO")
|
|
295
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=True)
|
|
296
|
+
else:
|
|
297
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason="Stake update failed")
|
|
298
|
+
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"Gossip Stake error: {e}")
|
|
301
|
+
return neuroshard_pb2.GossipStakeResponse(accepted=False, reason=str(e))
|
|
302
|
+
|
|
303
|
+
def RequestProofValidation(self, request, context):
|
|
304
|
+
"""
|
|
305
|
+
Handle proof validation requests.
|
|
306
|
+
|
|
307
|
+
When a node submits a proof, it can request validation from validators.
|
|
308
|
+
Validators check the proof and cast their vote.
|
|
309
|
+
"""
|
|
310
|
+
if not self.ledger:
|
|
311
|
+
return neuroshard_pb2.ProofValidationResponse(
|
|
312
|
+
accepted=False,
|
|
313
|
+
reason="No ledger available"
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
try:
|
|
317
|
+
from neuroshard.core.economics.ledger import PoNWProof, ProofType
|
|
318
|
+
|
|
319
|
+
# Check if we're eligible to validate
|
|
320
|
+
eligible, reason = self.ledger.is_eligible_validator()
|
|
321
|
+
if not eligible:
|
|
322
|
+
return neuroshard_pb2.ProofValidationResponse(
|
|
323
|
+
accepted=False,
|
|
324
|
+
reason=f"Not eligible validator: {reason}"
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Reconstruct the proof
|
|
328
|
+
proof = PoNWProof(
|
|
329
|
+
node_id=request.submitter_id,
|
|
330
|
+
proof_type=request.proof_type,
|
|
331
|
+
timestamp=request.timestamp,
|
|
332
|
+
nonce=request.nonce,
|
|
333
|
+
uptime_seconds=request.uptime_seconds,
|
|
334
|
+
tokens_processed=request.tokens_processed,
|
|
335
|
+
training_batches=request.training_batches,
|
|
336
|
+
layers_held=request.layers_held,
|
|
337
|
+
has_embedding=request.has_embedding,
|
|
338
|
+
has_lm_head=request.has_lm_head,
|
|
339
|
+
signature=request.proof_signature
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Validate the proof
|
|
343
|
+
# Check plausibility, rate limits, etc.
|
|
344
|
+
is_valid = self._validate_proof_locally(proof)
|
|
345
|
+
|
|
346
|
+
# Cast our vote
|
|
347
|
+
success, fee, msg = self.ledger.validate_proof_as_validator(
|
|
348
|
+
proof=proof,
|
|
349
|
+
vote=is_valid,
|
|
350
|
+
validation_details=f"Local validation: {'PASS' if is_valid else 'FAIL'}"
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
my_stake = self.ledger._get_stake(self.ledger.node_id)
|
|
354
|
+
|
|
355
|
+
logger.info(f"Validation: Voted {'VALID' if is_valid else 'INVALID'} on proof from "
|
|
356
|
+
f"{request.submitter_id[:16]}... (earned {fee:.4f} NEURO)")
|
|
357
|
+
|
|
358
|
+
return neuroshard_pb2.ProofValidationResponse(
|
|
359
|
+
accepted=True,
|
|
360
|
+
reason=msg,
|
|
361
|
+
validator_id=self.ledger.node_id,
|
|
362
|
+
validator_stake=my_stake
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
except Exception as e:
|
|
366
|
+
logger.error(f"Proof validation error: {e}")
|
|
367
|
+
return neuroshard_pb2.ProofValidationResponse(
|
|
368
|
+
accepted=False,
|
|
369
|
+
reason=str(e)
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
def _validate_proof_locally(self, proof) -> bool:
|
|
373
|
+
"""
|
|
374
|
+
Perform local validation checks on a proof.
|
|
375
|
+
|
|
376
|
+
Returns True if the proof appears legitimate.
|
|
377
|
+
"""
|
|
378
|
+
import time
|
|
379
|
+
from neuroshard.core.economics.ledger import (
|
|
380
|
+
MAX_UPTIME_PER_PROOF,
|
|
381
|
+
MAX_TOKENS_PER_MINUTE,
|
|
382
|
+
PROOF_FRESHNESS_WINDOW
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# 1. Timestamp freshness
|
|
386
|
+
age = abs(time.time() - proof.timestamp)
|
|
387
|
+
if age > PROOF_FRESHNESS_WINDOW:
|
|
388
|
+
logger.debug(f"Proof too old: {age:.0f}s > {PROOF_FRESHNESS_WINDOW}s")
|
|
389
|
+
return False
|
|
390
|
+
|
|
391
|
+
# 2. Uptime plausibility
|
|
392
|
+
if proof.uptime_seconds > MAX_UPTIME_PER_PROOF:
|
|
393
|
+
logger.debug(f"Uptime too high: {proof.uptime_seconds}s > {MAX_UPTIME_PER_PROOF}s")
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
# 3. Token rate plausibility
|
|
397
|
+
if proof.uptime_seconds > 0:
|
|
398
|
+
tokens_per_minute = (proof.tokens_processed / proof.uptime_seconds) * 60
|
|
399
|
+
if tokens_per_minute > MAX_TOKENS_PER_MINUTE:
|
|
400
|
+
logger.debug(f"Token rate too high: {tokens_per_minute:.0f}/min > {MAX_TOKENS_PER_MINUTE}/min")
|
|
401
|
+
return False
|
|
402
|
+
|
|
403
|
+
# 4. Signature format (basic check)
|
|
404
|
+
if not proof.signature or len(proof.signature) < 64:
|
|
405
|
+
logger.debug("Invalid signature format")
|
|
406
|
+
return False
|
|
407
|
+
|
|
408
|
+
# 5. Work Content Verification (via Ledger's Verifier)
|
|
409
|
+
if hasattr(self, 'ledger') and self.ledger and hasattr(self.ledger, 'verifier'):
|
|
410
|
+
is_work_valid, reason = self.ledger.verifier.verify_work_content(proof)
|
|
411
|
+
if not is_work_valid:
|
|
412
|
+
logger.debug(f"Work content invalid: {reason}")
|
|
413
|
+
return False
|
|
414
|
+
|
|
415
|
+
# All checks passed
|
|
416
|
+
return True
|
|
417
|
+
|
|
418
|
+
def GossipValidationVote(self, request, context):
|
|
419
|
+
"""
|
|
420
|
+
Handle incoming validation votes from other validators.
|
|
421
|
+
|
|
422
|
+
This allows validators to share their votes across the network.
|
|
423
|
+
"""
|
|
424
|
+
if not self.ledger:
|
|
425
|
+
return neuroshard_pb2.ValidationVoteResponse(
|
|
426
|
+
accepted=False,
|
|
427
|
+
reason="No ledger available"
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
try:
|
|
431
|
+
from neuroshard.core.crypto.ecdsa import verify_signature, is_valid_node_id_format
|
|
432
|
+
|
|
433
|
+
# Validate vote format
|
|
434
|
+
if not is_valid_node_id_format(request.validator_id):
|
|
435
|
+
return neuroshard_pb2.ValidationVoteResponse(
|
|
436
|
+
accepted=False,
|
|
437
|
+
reason="Invalid validator_id format"
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
# Verify signature
|
|
441
|
+
payload = f"{request.proof_signature}:{request.validator_id}:{request.vote}:{request.timestamp}"
|
|
442
|
+
if not verify_signature(request.validator_id, payload, request.signature):
|
|
443
|
+
return neuroshard_pb2.ValidationVoteResponse(
|
|
444
|
+
accepted=False,
|
|
445
|
+
reason="Invalid vote signature"
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
# Record the vote in our ledger
|
|
449
|
+
with self.ledger.lock:
|
|
450
|
+
import sqlite3
|
|
451
|
+
with sqlite3.connect(self.ledger.db_path, timeout=60.0) as conn:
|
|
452
|
+
# Ensure table exists
|
|
453
|
+
conn.execute("""
|
|
454
|
+
CREATE TABLE IF NOT EXISTS validation_votes (
|
|
455
|
+
validation_id TEXT PRIMARY KEY,
|
|
456
|
+
proof_signature TEXT NOT NULL,
|
|
457
|
+
validator_id TEXT NOT NULL,
|
|
458
|
+
validator_stake REAL NOT NULL,
|
|
459
|
+
vote INTEGER NOT NULL,
|
|
460
|
+
details TEXT,
|
|
461
|
+
timestamp REAL NOT NULL,
|
|
462
|
+
fee_earned REAL DEFAULT 0.0,
|
|
463
|
+
UNIQUE(proof_signature, validator_id)
|
|
464
|
+
)
|
|
465
|
+
""")
|
|
466
|
+
|
|
467
|
+
# Insert vote (ignore if already exists)
|
|
468
|
+
import hashlib
|
|
469
|
+
validation_id = hashlib.sha256(
|
|
470
|
+
f"{request.validator_id}:{request.proof_signature}:{request.timestamp}".encode()
|
|
471
|
+
).hexdigest()[:32]
|
|
472
|
+
|
|
473
|
+
conn.execute("""
|
|
474
|
+
INSERT OR IGNORE INTO validation_votes
|
|
475
|
+
(validation_id, proof_signature, validator_id, validator_stake, vote, details, timestamp)
|
|
476
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
477
|
+
""", (
|
|
478
|
+
validation_id,
|
|
479
|
+
request.proof_signature,
|
|
480
|
+
request.validator_id,
|
|
481
|
+
request.validator_stake,
|
|
482
|
+
1 if request.vote else 0,
|
|
483
|
+
request.details,
|
|
484
|
+
request.timestamp
|
|
485
|
+
))
|
|
486
|
+
|
|
487
|
+
# Get current validation status
|
|
488
|
+
status = self.ledger.get_proof_validation_status(request.proof_signature)
|
|
489
|
+
|
|
490
|
+
logger.debug(f"Received validation vote from {request.validator_id[:16]}... "
|
|
491
|
+
f"({'VALID' if request.vote else 'INVALID'})")
|
|
492
|
+
|
|
493
|
+
return neuroshard_pb2.ValidationVoteResponse(
|
|
494
|
+
accepted=True,
|
|
495
|
+
total_valid_stake=status["valid_stake"],
|
|
496
|
+
total_invalid_stake=status["invalid_stake"],
|
|
497
|
+
consensus_reached=status["consensus_reached"],
|
|
498
|
+
consensus_result=status.get("consensus_result", False)
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
logger.error(f"Gossip validation vote error: {e}")
|
|
503
|
+
return neuroshard_pb2.ValidationVoteResponse(
|
|
504
|
+
accepted=False,
|
|
505
|
+
reason=str(e)
|
|
506
|
+
)
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Encrypted Prompt Channel for Privacy-Preserving Inference
|
|
3
|
+
|
|
4
|
+
Users send encrypted prompts DIRECTLY to driver nodes (not via marketplace).
|
|
5
|
+
This ensures only the chosen driver can read the prompt.
|
|
6
|
+
|
|
7
|
+
For simplicity, we use symmetric encryption (shared secret via request_id).
|
|
8
|
+
In production, use asymmetric (driver's public key).
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import base64
|
|
12
|
+
import hashlib
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Optional, Dict
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from cryptography.fernet import Fernet
|
|
17
|
+
from cryptography.hazmat.primitives import hashes
|
|
18
|
+
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
|
19
|
+
import time
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class EncryptedPrompt:
|
|
26
|
+
"""An encrypted prompt sent to a driver node."""
|
|
27
|
+
request_id: str
|
|
28
|
+
encrypted_data: str # Base64 encoded
|
|
29
|
+
timestamp: float
|
|
30
|
+
user_id: str
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class PromptEncryption:
|
|
34
|
+
"""Handles encryption/decryption of prompts for privacy."""
|
|
35
|
+
|
|
36
|
+
@staticmethod
|
|
37
|
+
def derive_key(request_id: str, salt: bytes = b'neuroshard_v1') -> bytes:
|
|
38
|
+
"""
|
|
39
|
+
Derive encryption key from request_id.
|
|
40
|
+
|
|
41
|
+
In production: Use driver's public key (asymmetric encryption).
|
|
42
|
+
For now: Derive symmetric key from request_id (both sides know it).
|
|
43
|
+
"""
|
|
44
|
+
kdf = PBKDF2HMAC(
|
|
45
|
+
algorithm=hashes.SHA256(),
|
|
46
|
+
length=32,
|
|
47
|
+
salt=salt,
|
|
48
|
+
iterations=100000,
|
|
49
|
+
)
|
|
50
|
+
key = base64.urlsafe_b64encode(kdf.derive(request_id.encode()))
|
|
51
|
+
return key
|
|
52
|
+
|
|
53
|
+
@staticmethod
|
|
54
|
+
def encrypt_prompt(prompt: str, request_id: str) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Encrypt a prompt for sending to driver.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
prompt: The plaintext prompt
|
|
60
|
+
request_id: The request ID (used to derive key)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Base64 encoded encrypted data
|
|
64
|
+
"""
|
|
65
|
+
key = PromptEncryption.derive_key(request_id)
|
|
66
|
+
f = Fernet(key)
|
|
67
|
+
encrypted = f.encrypt(prompt.encode('utf-8'))
|
|
68
|
+
return base64.b64encode(encrypted).decode('utf-8')
|
|
69
|
+
|
|
70
|
+
@staticmethod
|
|
71
|
+
def decrypt_prompt(encrypted_data: str, request_id: str) -> str:
|
|
72
|
+
"""
|
|
73
|
+
Decrypt a prompt received from user.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
encrypted_data: Base64 encoded encrypted prompt
|
|
77
|
+
request_id: The request ID (used to derive key)
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Plaintext prompt
|
|
81
|
+
"""
|
|
82
|
+
key = PromptEncryption.derive_key(request_id)
|
|
83
|
+
f = Fernet(key)
|
|
84
|
+
encrypted = base64.b64decode(encrypted_data.encode('utf-8'))
|
|
85
|
+
decrypted = f.decrypt(encrypted)
|
|
86
|
+
return decrypted.decode('utf-8')
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class PromptQueue:
|
|
90
|
+
"""
|
|
91
|
+
Queue for encrypted prompts waiting to be processed by driver.
|
|
92
|
+
|
|
93
|
+
Driver nodes maintain this queue - prompts are sent directly from users.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def __init__(self, max_size: int = 100):
|
|
97
|
+
self.prompts: Dict[str, EncryptedPrompt] = {}
|
|
98
|
+
self.max_size = max_size
|
|
99
|
+
self.timeout = 300 # 5 minutes
|
|
100
|
+
|
|
101
|
+
def add_prompt(self, prompt: EncryptedPrompt) -> bool:
|
|
102
|
+
"""
|
|
103
|
+
Add encrypted prompt to queue.
|
|
104
|
+
|
|
105
|
+
Returns True if added, False if queue full.
|
|
106
|
+
"""
|
|
107
|
+
if len(self.prompts) >= self.max_size:
|
|
108
|
+
# Cleanup old prompts first
|
|
109
|
+
self.cleanup_old_prompts()
|
|
110
|
+
|
|
111
|
+
if len(self.prompts) >= self.max_size:
|
|
112
|
+
logger.warning(f"Prompt queue full ({self.max_size}), rejecting new prompt")
|
|
113
|
+
return False
|
|
114
|
+
|
|
115
|
+
self.prompts[prompt.request_id] = prompt
|
|
116
|
+
logger.info(f"Added encrypted prompt for request {prompt.request_id[:8]}... (queue size: {len(self.prompts)})")
|
|
117
|
+
return True
|
|
118
|
+
|
|
119
|
+
def get_prompt(self, request_id: str) -> Optional[EncryptedPrompt]:
|
|
120
|
+
"""Get and remove prompt from queue."""
|
|
121
|
+
prompt = self.prompts.pop(request_id, None)
|
|
122
|
+
if prompt:
|
|
123
|
+
logger.debug(f"Retrieved prompt for request {request_id[:8]}...")
|
|
124
|
+
return prompt
|
|
125
|
+
|
|
126
|
+
def has_prompt(self, request_id: str) -> bool:
|
|
127
|
+
"""Check if prompt exists for request."""
|
|
128
|
+
return request_id in self.prompts
|
|
129
|
+
|
|
130
|
+
def cleanup_old_prompts(self):
|
|
131
|
+
"""Remove prompts older than timeout."""
|
|
132
|
+
now = time.time()
|
|
133
|
+
to_remove = [
|
|
134
|
+
req_id for req_id, prompt in self.prompts.items()
|
|
135
|
+
if (now - prompt.timestamp) > self.timeout
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
for req_id in to_remove:
|
|
139
|
+
del self.prompts[req_id]
|
|
140
|
+
logger.info(f"Removed old prompt for request {req_id[:8]}... (timeout)")
|
|
141
|
+
|