nexaroa 0.0.111__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuroshard/__init__.py +93 -0
- neuroshard/__main__.py +4 -0
- neuroshard/cli.py +466 -0
- neuroshard/core/__init__.py +92 -0
- neuroshard/core/consensus/verifier.py +252 -0
- neuroshard/core/crypto/__init__.py +20 -0
- neuroshard/core/crypto/ecdsa.py +392 -0
- neuroshard/core/economics/__init__.py +52 -0
- neuroshard/core/economics/constants.py +387 -0
- neuroshard/core/economics/ledger.py +2111 -0
- neuroshard/core/economics/market.py +975 -0
- neuroshard/core/economics/wallet.py +168 -0
- neuroshard/core/governance/__init__.py +74 -0
- neuroshard/core/governance/proposal.py +561 -0
- neuroshard/core/governance/registry.py +545 -0
- neuroshard/core/governance/versioning.py +332 -0
- neuroshard/core/governance/voting.py +453 -0
- neuroshard/core/model/__init__.py +30 -0
- neuroshard/core/model/dynamic.py +4186 -0
- neuroshard/core/model/llm.py +905 -0
- neuroshard/core/model/registry.py +164 -0
- neuroshard/core/model/scaler.py +387 -0
- neuroshard/core/model/tokenizer.py +568 -0
- neuroshard/core/network/__init__.py +56 -0
- neuroshard/core/network/connection_pool.py +72 -0
- neuroshard/core/network/dht.py +130 -0
- neuroshard/core/network/dht_plan.py +55 -0
- neuroshard/core/network/dht_proof_store.py +516 -0
- neuroshard/core/network/dht_protocol.py +261 -0
- neuroshard/core/network/dht_service.py +506 -0
- neuroshard/core/network/encrypted_channel.py +141 -0
- neuroshard/core/network/nat.py +201 -0
- neuroshard/core/network/nat_traversal.py +695 -0
- neuroshard/core/network/p2p.py +929 -0
- neuroshard/core/network/p2p_data.py +150 -0
- neuroshard/core/swarm/__init__.py +106 -0
- neuroshard/core/swarm/aggregation.py +729 -0
- neuroshard/core/swarm/buffers.py +643 -0
- neuroshard/core/swarm/checkpoint.py +709 -0
- neuroshard/core/swarm/compute.py +624 -0
- neuroshard/core/swarm/diloco.py +844 -0
- neuroshard/core/swarm/factory.py +1288 -0
- neuroshard/core/swarm/heartbeat.py +669 -0
- neuroshard/core/swarm/logger.py +487 -0
- neuroshard/core/swarm/router.py +658 -0
- neuroshard/core/swarm/service.py +640 -0
- neuroshard/core/training/__init__.py +29 -0
- neuroshard/core/training/checkpoint.py +600 -0
- neuroshard/core/training/distributed.py +1602 -0
- neuroshard/core/training/global_tracker.py +617 -0
- neuroshard/core/training/production.py +276 -0
- neuroshard/governance_cli.py +729 -0
- neuroshard/grpc_server.py +895 -0
- neuroshard/runner.py +3223 -0
- neuroshard/sdk/__init__.py +92 -0
- neuroshard/sdk/client.py +990 -0
- neuroshard/sdk/errors.py +101 -0
- neuroshard/sdk/types.py +282 -0
- neuroshard/tracker/__init__.py +0 -0
- neuroshard/tracker/server.py +864 -0
- neuroshard/ui/__init__.py +0 -0
- neuroshard/ui/app.py +102 -0
- neuroshard/ui/templates/index.html +1052 -0
- neuroshard/utils/__init__.py +0 -0
- neuroshard/utils/autostart.py +81 -0
- neuroshard/utils/hardware.py +121 -0
- neuroshard/utils/serialization.py +90 -0
- neuroshard/version.py +1 -0
- nexaroa-0.0.111.dist-info/METADATA +283 -0
- nexaroa-0.0.111.dist-info/RECORD +78 -0
- nexaroa-0.0.111.dist-info/WHEEL +5 -0
- nexaroa-0.0.111.dist-info/entry_points.txt +4 -0
- nexaroa-0.0.111.dist-info/licenses/LICENSE +190 -0
- nexaroa-0.0.111.dist-info/top_level.txt +2 -0
- protos/__init__.py +0 -0
- protos/neuroshard.proto +651 -0
- protos/neuroshard_pb2.py +160 -0
- protos/neuroshard_pb2_grpc.py +1298 -0
neuroshard/sdk/client.py
ADDED
|
@@ -0,0 +1,990 @@
|
|
|
1
|
+
"""
|
|
2
|
+
NeuroShard SDK Client
|
|
3
|
+
|
|
4
|
+
High-level Python client for interacting with NeuroShard nodes.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
from neuroshard import NeuroNode, NEUROLedger
|
|
8
|
+
|
|
9
|
+
node = NeuroNode("http://localhost:8000", api_token="YOUR_TOKEN")
|
|
10
|
+
status = node.get_status()
|
|
11
|
+
response = node.inference("Hello!", max_tokens=50)
|
|
12
|
+
|
|
13
|
+
ledger = NEUROLedger(node)
|
|
14
|
+
balance = ledger.get_balance()
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import time
|
|
19
|
+
import json
|
|
20
|
+
import requests
|
|
21
|
+
from typing import Optional, List, Iterator, Dict, Any, Generator
|
|
22
|
+
from datetime import datetime, date, timedelta
|
|
23
|
+
|
|
24
|
+
from neuroshard.sdk.types import (
|
|
25
|
+
NodeStatus,
|
|
26
|
+
Metrics,
|
|
27
|
+
InferenceResponse,
|
|
28
|
+
InferenceChunk,
|
|
29
|
+
PeerInfo,
|
|
30
|
+
LayerInfo,
|
|
31
|
+
Balance,
|
|
32
|
+
Transaction,
|
|
33
|
+
StakeInfo,
|
|
34
|
+
StakeResult,
|
|
35
|
+
UnstakeResult,
|
|
36
|
+
RewardSummary,
|
|
37
|
+
DailyReward,
|
|
38
|
+
NodeConfig,
|
|
39
|
+
TrainingStatus,
|
|
40
|
+
ResourceStatus,
|
|
41
|
+
InferenceMetrics,
|
|
42
|
+
TrainingMetrics,
|
|
43
|
+
NetworkMetrics,
|
|
44
|
+
RewardMetrics,
|
|
45
|
+
TokenUsage,
|
|
46
|
+
Cost,
|
|
47
|
+
Timing,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
from neuroshard.sdk.errors import (
|
|
51
|
+
NeuroShardError,
|
|
52
|
+
AuthenticationError,
|
|
53
|
+
InsufficientBalanceError,
|
|
54
|
+
RateLimitError,
|
|
55
|
+
NodeOfflineError,
|
|
56
|
+
InvalidRequestError,
|
|
57
|
+
NotFoundError,
|
|
58
|
+
ForbiddenError,
|
|
59
|
+
InternalError,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class NeuroNode:
|
|
64
|
+
"""
|
|
65
|
+
High-level client for interacting with a NeuroShard node.
|
|
66
|
+
|
|
67
|
+
Example:
|
|
68
|
+
node = NeuroNode("http://localhost:8000", api_token="YOUR_TOKEN")
|
|
69
|
+
|
|
70
|
+
# Check status
|
|
71
|
+
status = node.get_status()
|
|
72
|
+
print(f"Node: {status.node_id}")
|
|
73
|
+
|
|
74
|
+
# Run inference
|
|
75
|
+
response = node.inference("Explain quantum computing.", max_tokens=100)
|
|
76
|
+
print(response.text)
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
url: str,
|
|
82
|
+
api_token: Optional[str] = None,
|
|
83
|
+
timeout: float = 30.0,
|
|
84
|
+
retry_attempts: int = 3,
|
|
85
|
+
verify_ssl: bool = True,
|
|
86
|
+
):
|
|
87
|
+
"""
|
|
88
|
+
Initialize NeuroNode client.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
url: Node URL (e.g., "http://localhost:8000")
|
|
92
|
+
api_token: API token for authentication
|
|
93
|
+
timeout: Request timeout in seconds
|
|
94
|
+
retry_attempts: Number of retry attempts for failed requests
|
|
95
|
+
verify_ssl: Whether to verify SSL certificates
|
|
96
|
+
"""
|
|
97
|
+
self.url = url.rstrip("/")
|
|
98
|
+
self.api_token = api_token
|
|
99
|
+
self.timeout = timeout
|
|
100
|
+
self.retry_attempts = retry_attempts
|
|
101
|
+
self.verify_ssl = verify_ssl
|
|
102
|
+
|
|
103
|
+
self._session = requests.Session()
|
|
104
|
+
if api_token:
|
|
105
|
+
self._session.headers["Authorization"] = f"Bearer {api_token}"
|
|
106
|
+
self._session.headers["Content-Type"] = "application/json"
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def from_env(cls) -> "NeuroNode":
|
|
110
|
+
"""
|
|
111
|
+
Create a NeuroNode from environment variables.
|
|
112
|
+
|
|
113
|
+
Uses:
|
|
114
|
+
NEUROSHARD_URL: Node URL
|
|
115
|
+
NEUROSHARD_TOKEN: API token
|
|
116
|
+
NEUROSHARD_TIMEOUT: Request timeout (optional)
|
|
117
|
+
"""
|
|
118
|
+
url = os.environ.get("NEUROSHARD_URL", "http://localhost:8000")
|
|
119
|
+
token = os.environ.get("NEUROSHARD_TOKEN")
|
|
120
|
+
timeout = float(os.environ.get("NEUROSHARD_TIMEOUT", "30"))
|
|
121
|
+
|
|
122
|
+
return cls(url=url, api_token=token, timeout=timeout)
|
|
123
|
+
|
|
124
|
+
@classmethod
|
|
125
|
+
def from_config(cls, config_path: Optional[str] = None) -> "NeuroNode":
|
|
126
|
+
"""
|
|
127
|
+
Create a NeuroNode from a config file.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
config_path: Path to config file. Defaults to ~/.neuroshard/config.json
|
|
131
|
+
"""
|
|
132
|
+
if config_path is None:
|
|
133
|
+
config_path = os.path.expanduser("~/.neuroshard/config.json")
|
|
134
|
+
|
|
135
|
+
if not os.path.exists(config_path):
|
|
136
|
+
raise FileNotFoundError(f"Config file not found: {config_path}")
|
|
137
|
+
|
|
138
|
+
with open(config_path) as f:
|
|
139
|
+
config = json.load(f)
|
|
140
|
+
|
|
141
|
+
return cls(
|
|
142
|
+
url=config.get("url", "http://localhost:8000"),
|
|
143
|
+
api_token=config.get("token"),
|
|
144
|
+
timeout=config.get("timeout", 30.0),
|
|
145
|
+
retry_attempts=config.get("retry_attempts", 3),
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
def _request(
|
|
149
|
+
self,
|
|
150
|
+
method: str,
|
|
151
|
+
endpoint: str,
|
|
152
|
+
data: Optional[dict] = None,
|
|
153
|
+
params: Optional[dict] = None,
|
|
154
|
+
stream: bool = False,
|
|
155
|
+
) -> dict:
|
|
156
|
+
"""Make an HTTP request to the node."""
|
|
157
|
+
url = f"{self.url}{endpoint}"
|
|
158
|
+
|
|
159
|
+
last_error = None
|
|
160
|
+
for attempt in range(self.retry_attempts):
|
|
161
|
+
try:
|
|
162
|
+
response = self._session.request(
|
|
163
|
+
method=method,
|
|
164
|
+
url=url,
|
|
165
|
+
json=data,
|
|
166
|
+
params=params,
|
|
167
|
+
timeout=self.timeout,
|
|
168
|
+
verify=self.verify_ssl,
|
|
169
|
+
stream=stream,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
# Handle errors
|
|
173
|
+
if response.status_code == 401:
|
|
174
|
+
raise AuthenticationError()
|
|
175
|
+
elif response.status_code == 403:
|
|
176
|
+
raise ForbiddenError()
|
|
177
|
+
elif response.status_code == 404:
|
|
178
|
+
raise NotFoundError("Resource", endpoint)
|
|
179
|
+
elif response.status_code == 429:
|
|
180
|
+
retry_after = int(response.headers.get("Retry-After", 60))
|
|
181
|
+
raise RateLimitError(retry_after)
|
|
182
|
+
elif response.status_code >= 500:
|
|
183
|
+
raise InternalError()
|
|
184
|
+
elif response.status_code >= 400:
|
|
185
|
+
try:
|
|
186
|
+
error_data = response.json()
|
|
187
|
+
error_info = error_data.get("error", {})
|
|
188
|
+
code = error_info.get("code", "UNKNOWN")
|
|
189
|
+
message = error_info.get("message", response.text)
|
|
190
|
+
|
|
191
|
+
if code == "INSUFFICIENT_BALANCE":
|
|
192
|
+
details = error_info.get("details", {})
|
|
193
|
+
raise InsufficientBalanceError(
|
|
194
|
+
required=details.get("required", 0),
|
|
195
|
+
available=details.get("available", 0),
|
|
196
|
+
)
|
|
197
|
+
raise InvalidRequestError(message)
|
|
198
|
+
except (json.JSONDecodeError, KeyError):
|
|
199
|
+
raise InvalidRequestError(response.text)
|
|
200
|
+
|
|
201
|
+
if stream:
|
|
202
|
+
return response
|
|
203
|
+
|
|
204
|
+
return response.json()
|
|
205
|
+
|
|
206
|
+
except requests.exceptions.ConnectionError as e:
|
|
207
|
+
last_error = NodeOfflineError(self.url)
|
|
208
|
+
if attempt < self.retry_attempts - 1:
|
|
209
|
+
time.sleep(0.5 * (attempt + 1))
|
|
210
|
+
continue
|
|
211
|
+
raise last_error
|
|
212
|
+
except requests.exceptions.Timeout:
|
|
213
|
+
last_error = NeuroShardError("Request timeout", code="TIMEOUT")
|
|
214
|
+
if attempt < self.retry_attempts - 1:
|
|
215
|
+
time.sleep(0.5 * (attempt + 1))
|
|
216
|
+
continue
|
|
217
|
+
raise last_error
|
|
218
|
+
except NeuroShardError:
|
|
219
|
+
raise
|
|
220
|
+
except Exception as e:
|
|
221
|
+
raise NeuroShardError(str(e))
|
|
222
|
+
|
|
223
|
+
if last_error:
|
|
224
|
+
raise last_error
|
|
225
|
+
raise NeuroShardError("Request failed after retries")
|
|
226
|
+
|
|
227
|
+
def get_status(self) -> NodeStatus:
|
|
228
|
+
"""
|
|
229
|
+
Get current node status.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
NodeStatus with node info, layers, peers, training status, etc.
|
|
233
|
+
"""
|
|
234
|
+
data = self._request("GET", "/api/v1/status")
|
|
235
|
+
|
|
236
|
+
return NodeStatus(
|
|
237
|
+
node_id=data.get("node_id", ""),
|
|
238
|
+
version=data.get("version", ""),
|
|
239
|
+
uptime_seconds=data.get("uptime_seconds", 0),
|
|
240
|
+
status=data.get("status", "unknown"),
|
|
241
|
+
role=data.get("role", "unknown"),
|
|
242
|
+
layers=data.get("layers", []),
|
|
243
|
+
peer_count=data.get("peer_count", 0),
|
|
244
|
+
has_embedding=data.get("has_embedding", False),
|
|
245
|
+
has_lm_head=data.get("has_lm_head", False),
|
|
246
|
+
training=TrainingStatus(
|
|
247
|
+
enabled=data.get("training", {}).get("enabled", False),
|
|
248
|
+
epoch=data.get("training", {}).get("epoch", 0),
|
|
249
|
+
step=data.get("training", {}).get("step", 0),
|
|
250
|
+
loss=data.get("training", {}).get("loss", 0.0),
|
|
251
|
+
),
|
|
252
|
+
resources=ResourceStatus(
|
|
253
|
+
gpu_memory_used=data.get("resources", {}).get("gpu_memory_used", 0),
|
|
254
|
+
gpu_memory_total=data.get("resources", {}).get("gpu_memory_total", 0),
|
|
255
|
+
cpu_percent=data.get("resources", {}).get("cpu_percent", 0.0),
|
|
256
|
+
ram_used=data.get("resources", {}).get("ram_used", 0),
|
|
257
|
+
ram_total=data.get("resources", {}).get("ram_total", 0),
|
|
258
|
+
),
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
def get_metrics(self) -> Metrics:
|
|
262
|
+
"""
|
|
263
|
+
Get performance metrics.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Metrics with inference, training, network, and reward stats.
|
|
267
|
+
"""
|
|
268
|
+
data = self._request("GET", "/api/v1/metrics")
|
|
269
|
+
|
|
270
|
+
return Metrics(
|
|
271
|
+
timestamp=data.get("timestamp", ""),
|
|
272
|
+
inference=InferenceMetrics(
|
|
273
|
+
requests_total=data.get("inference", {}).get("requests_total", 0),
|
|
274
|
+
requests_per_minute=data.get("inference", {}).get("requests_per_minute", 0.0),
|
|
275
|
+
avg_latency_ms=data.get("inference", {}).get("avg_latency_ms", 0.0),
|
|
276
|
+
p99_latency_ms=data.get("inference", {}).get("p99_latency_ms", 0.0),
|
|
277
|
+
tokens_generated=data.get("inference", {}).get("tokens_generated", 0),
|
|
278
|
+
),
|
|
279
|
+
training=TrainingMetrics(
|
|
280
|
+
steps_total=data.get("training", {}).get("steps_total", 0),
|
|
281
|
+
steps_per_hour=data.get("training", {}).get("steps_per_hour", 0.0),
|
|
282
|
+
gradients_submitted=data.get("training", {}).get("gradients_submitted", 0),
|
|
283
|
+
gradients_accepted=data.get("training", {}).get("gradients_accepted", 0),
|
|
284
|
+
),
|
|
285
|
+
network=NetworkMetrics(
|
|
286
|
+
bytes_sent=data.get("network", {}).get("bytes_sent", 0),
|
|
287
|
+
bytes_received=data.get("network", {}).get("bytes_received", 0),
|
|
288
|
+
active_connections=data.get("network", {}).get("active_connections", 0),
|
|
289
|
+
rpc_calls=data.get("network", {}).get("rpc_calls", 0),
|
|
290
|
+
peer_count=data.get("network", {}).get("peer_count", 0),
|
|
291
|
+
),
|
|
292
|
+
rewards=RewardMetrics(
|
|
293
|
+
earned_today=data.get("rewards", {}).get("earned_today", 0.0),
|
|
294
|
+
earned_total=data.get("rewards", {}).get("earned_total", 0.0),
|
|
295
|
+
pending=data.get("rewards", {}).get("pending", 0.0),
|
|
296
|
+
),
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
def inference(
|
|
300
|
+
self,
|
|
301
|
+
prompt: str,
|
|
302
|
+
max_tokens: int = 100,
|
|
303
|
+
temperature: float = 1.0,
|
|
304
|
+
top_p: float = 1.0,
|
|
305
|
+
top_k: int = 50,
|
|
306
|
+
stop: Optional[List[str]] = None,
|
|
307
|
+
stream: bool = False,
|
|
308
|
+
) -> InferenceResponse:
|
|
309
|
+
"""
|
|
310
|
+
Run an inference request.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
prompt: Input text
|
|
314
|
+
max_tokens: Maximum tokens to generate
|
|
315
|
+
temperature: Sampling temperature (0-2)
|
|
316
|
+
top_p: Nucleus sampling threshold
|
|
317
|
+
top_k: Top-k sampling
|
|
318
|
+
stop: Stop sequences
|
|
319
|
+
stream: Enable streaming (use inference_stream for streaming)
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
InferenceResponse with generated text, usage, cost, timing.
|
|
323
|
+
"""
|
|
324
|
+
if stream:
|
|
325
|
+
# For streaming, collect all chunks
|
|
326
|
+
chunks = list(self.inference_stream(
|
|
327
|
+
prompt=prompt,
|
|
328
|
+
max_tokens=max_tokens,
|
|
329
|
+
temperature=temperature,
|
|
330
|
+
top_p=top_p,
|
|
331
|
+
top_k=top_k,
|
|
332
|
+
stop=stop,
|
|
333
|
+
))
|
|
334
|
+
text = "".join(c.token for c in chunks)
|
|
335
|
+
return InferenceResponse(
|
|
336
|
+
id="stream",
|
|
337
|
+
text=text,
|
|
338
|
+
tokens_generated=len(chunks),
|
|
339
|
+
finish_reason="stop",
|
|
340
|
+
usage=TokenUsage(
|
|
341
|
+
prompt_tokens=len(prompt.split()),
|
|
342
|
+
completion_tokens=len(chunks),
|
|
343
|
+
total_tokens=len(prompt.split()) + len(chunks),
|
|
344
|
+
),
|
|
345
|
+
cost=Cost(amount=0.0, currency="NEURO"),
|
|
346
|
+
timing=Timing(),
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
data = self._request("POST", "/api/v1/inference", data={
|
|
350
|
+
"prompt": prompt,
|
|
351
|
+
"max_tokens": max_tokens,
|
|
352
|
+
"temperature": temperature,
|
|
353
|
+
"top_p": top_p,
|
|
354
|
+
"top_k": top_k,
|
|
355
|
+
"stop": stop or [],
|
|
356
|
+
"stream": False,
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
return InferenceResponse(
|
|
360
|
+
id=data.get("id", ""),
|
|
361
|
+
text=data.get("text", ""),
|
|
362
|
+
tokens_generated=data.get("tokens_generated", 0),
|
|
363
|
+
finish_reason=data.get("finish_reason", "stop"),
|
|
364
|
+
usage=TokenUsage(
|
|
365
|
+
prompt_tokens=data.get("usage", {}).get("prompt_tokens", 0),
|
|
366
|
+
completion_tokens=data.get("usage", {}).get("completion_tokens", 0),
|
|
367
|
+
total_tokens=data.get("usage", {}).get("total_tokens", 0),
|
|
368
|
+
),
|
|
369
|
+
cost=Cost(
|
|
370
|
+
amount=data.get("cost", {}).get("amount", 0.0),
|
|
371
|
+
currency=data.get("cost", {}).get("currency", "NEURO"),
|
|
372
|
+
),
|
|
373
|
+
timing=Timing(
|
|
374
|
+
queue_ms=data.get("timing", {}).get("queue_ms", 0.0),
|
|
375
|
+
inference_ms=data.get("timing", {}).get("inference_ms", 0.0),
|
|
376
|
+
total_ms=data.get("timing", {}).get("total_ms", 0.0),
|
|
377
|
+
),
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
def inference_stream(
|
|
381
|
+
self,
|
|
382
|
+
prompt: str,
|
|
383
|
+
max_tokens: int = 100,
|
|
384
|
+
temperature: float = 1.0,
|
|
385
|
+
top_p: float = 1.0,
|
|
386
|
+
top_k: int = 50,
|
|
387
|
+
stop: Optional[List[str]] = None,
|
|
388
|
+
) -> Generator[InferenceChunk, None, None]:
|
|
389
|
+
"""
|
|
390
|
+
Stream inference response token by token.
|
|
391
|
+
|
|
392
|
+
Args:
|
|
393
|
+
prompt: Input text
|
|
394
|
+
max_tokens: Maximum tokens to generate
|
|
395
|
+
temperature: Sampling temperature
|
|
396
|
+
top_p: Nucleus sampling threshold
|
|
397
|
+
top_k: Top-k sampling
|
|
398
|
+
stop: Stop sequences
|
|
399
|
+
|
|
400
|
+
Yields:
|
|
401
|
+
InferenceChunk for each generated token.
|
|
402
|
+
"""
|
|
403
|
+
response = self._request("POST", "/api/v1/inference", data={
|
|
404
|
+
"prompt": prompt,
|
|
405
|
+
"max_tokens": max_tokens,
|
|
406
|
+
"temperature": temperature,
|
|
407
|
+
"top_p": top_p,
|
|
408
|
+
"top_k": top_k,
|
|
409
|
+
"stop": stop or [],
|
|
410
|
+
"stream": True,
|
|
411
|
+
}, stream=True)
|
|
412
|
+
|
|
413
|
+
index = 0
|
|
414
|
+
for line in response.iter_lines():
|
|
415
|
+
if not line:
|
|
416
|
+
continue
|
|
417
|
+
|
|
418
|
+
line = line.decode("utf-8")
|
|
419
|
+
if line.startswith("data: "):
|
|
420
|
+
data_str = line[6:]
|
|
421
|
+
if data_str == "[DONE]":
|
|
422
|
+
break
|
|
423
|
+
|
|
424
|
+
try:
|
|
425
|
+
data = json.loads(data_str)
|
|
426
|
+
token = data.get("token", "")
|
|
427
|
+
if token == "[DONE]":
|
|
428
|
+
break
|
|
429
|
+
|
|
430
|
+
yield InferenceChunk(
|
|
431
|
+
token=token,
|
|
432
|
+
index=index,
|
|
433
|
+
logprob=data.get("logprob"),
|
|
434
|
+
)
|
|
435
|
+
index += 1
|
|
436
|
+
except json.JSONDecodeError:
|
|
437
|
+
continue
|
|
438
|
+
|
|
439
|
+
def get_peers(self) -> List[PeerInfo]:
|
|
440
|
+
"""
|
|
441
|
+
List connected peers.
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
List of PeerInfo with peer details.
|
|
445
|
+
"""
|
|
446
|
+
data = self._request("GET", "/api/v1/peers")
|
|
447
|
+
|
|
448
|
+
peers = []
|
|
449
|
+
for p in data.get("peers", []):
|
|
450
|
+
peers.append(PeerInfo(
|
|
451
|
+
id=p.get("id", ""),
|
|
452
|
+
address=p.get("address", ""),
|
|
453
|
+
role=p.get("role", "worker"),
|
|
454
|
+
layers=p.get("layers", []),
|
|
455
|
+
latency_ms=p.get("latency_ms", 0.0),
|
|
456
|
+
connected_since=p.get("connected_since"),
|
|
457
|
+
))
|
|
458
|
+
return peers
|
|
459
|
+
|
|
460
|
+
def get_layers(self) -> List[LayerInfo]:
|
|
461
|
+
"""
|
|
462
|
+
List assigned layers.
|
|
463
|
+
|
|
464
|
+
Returns:
|
|
465
|
+
List of LayerInfo with layer details.
|
|
466
|
+
"""
|
|
467
|
+
data = self._request("GET", "/api/v1/layers")
|
|
468
|
+
|
|
469
|
+
layers = []
|
|
470
|
+
for l in data.get("layers", []):
|
|
471
|
+
layers.append(LayerInfo(
|
|
472
|
+
index=l.get("index", 0),
|
|
473
|
+
type=l.get("type", "transformer"),
|
|
474
|
+
memory_mb=l.get("memory_mb", 0),
|
|
475
|
+
status=l.get("status", "active"),
|
|
476
|
+
))
|
|
477
|
+
return layers
|
|
478
|
+
|
|
479
|
+
def get_config(self) -> NodeConfig:
|
|
480
|
+
"""
|
|
481
|
+
Get node configuration.
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
NodeConfig with current settings.
|
|
485
|
+
"""
|
|
486
|
+
data = self._request("GET", "/api/v1/config")
|
|
487
|
+
|
|
488
|
+
return NodeConfig(
|
|
489
|
+
node_id=data.get("node_id", ""),
|
|
490
|
+
port=data.get("port", 8000),
|
|
491
|
+
grpc_port=data.get("grpc_port", 9000),
|
|
492
|
+
tracker_url=data.get("tracker_url", ""),
|
|
493
|
+
training=data.get("training", {}),
|
|
494
|
+
resources=data.get("resources", {}),
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
def update_config(self, updates: Dict[str, Any]) -> bool:
|
|
498
|
+
"""
|
|
499
|
+
Update node configuration.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
updates: Dictionary of configuration updates.
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
True if update was successful.
|
|
506
|
+
"""
|
|
507
|
+
data = self._request("PATCH", "/api/v1/config", data=updates)
|
|
508
|
+
return data.get("success", False)
|
|
509
|
+
|
|
510
|
+
def health(self) -> Dict[str, Any]:
|
|
511
|
+
"""
|
|
512
|
+
Check node health.
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
Health status dictionary.
|
|
516
|
+
"""
|
|
517
|
+
return self._request("GET", "/api/v1/health")
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
class NEUROLedger:
|
|
521
|
+
"""
|
|
522
|
+
Client for NEURO token operations.
|
|
523
|
+
|
|
524
|
+
Example:
|
|
525
|
+
node = NeuroNode("http://localhost:8000", api_token="YOUR_TOKEN")
|
|
526
|
+
ledger = NEUROLedger(node)
|
|
527
|
+
|
|
528
|
+
balance = ledger.get_balance()
|
|
529
|
+
print(f"Balance: {balance.available} NEURO")
|
|
530
|
+
"""
|
|
531
|
+
|
|
532
|
+
def __init__(self, node: NeuroNode):
|
|
533
|
+
"""
|
|
534
|
+
Initialize NEUROLedger.
|
|
535
|
+
|
|
536
|
+
Args:
|
|
537
|
+
node: NeuroNode instance to use for API calls.
|
|
538
|
+
"""
|
|
539
|
+
self.node = node
|
|
540
|
+
|
|
541
|
+
def _parse_timestamp(self, ts_str: str) -> datetime:
|
|
542
|
+
"""Parse ISO timestamp, handling Z suffix."""
|
|
543
|
+
if ts_str.endswith("Z"):
|
|
544
|
+
ts_str = ts_str[:-1] + "+00:00"
|
|
545
|
+
return datetime.fromisoformat(ts_str)
|
|
546
|
+
|
|
547
|
+
def get_balance(self) -> Balance:
|
|
548
|
+
"""
|
|
549
|
+
Get wallet balance.
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
Balance with available, staked, pending, and total amounts.
|
|
553
|
+
"""
|
|
554
|
+
data = self.node._request("GET", "/api/v1/wallet/balance")
|
|
555
|
+
|
|
556
|
+
balances = data.get("balances", data)
|
|
557
|
+
return Balance(
|
|
558
|
+
address=data.get("address", ""),
|
|
559
|
+
available=balances.get("available", 0.0),
|
|
560
|
+
staked=balances.get("staked", 0.0),
|
|
561
|
+
pending=balances.get("pending", 0.0),
|
|
562
|
+
total=balances.get("total", 0.0),
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
def send(
|
|
566
|
+
self,
|
|
567
|
+
to: str,
|
|
568
|
+
amount: float,
|
|
569
|
+
memo: Optional[str] = None,
|
|
570
|
+
) -> Transaction:
|
|
571
|
+
"""
|
|
572
|
+
Send NEURO to another address.
|
|
573
|
+
|
|
574
|
+
Args:
|
|
575
|
+
to: Recipient address
|
|
576
|
+
amount: Amount in NEURO
|
|
577
|
+
memo: Optional transaction memo
|
|
578
|
+
|
|
579
|
+
Returns:
|
|
580
|
+
Transaction with details.
|
|
581
|
+
"""
|
|
582
|
+
data = self.node._request("POST", "/api/v1/wallet/send", data={
|
|
583
|
+
"to": to,
|
|
584
|
+
"amount": amount,
|
|
585
|
+
"memo": memo or "",
|
|
586
|
+
})
|
|
587
|
+
|
|
588
|
+
return Transaction(
|
|
589
|
+
id=data.get("transaction_id", ""),
|
|
590
|
+
from_address=data.get("from", ""),
|
|
591
|
+
to_address=data.get("to", ""),
|
|
592
|
+
amount=data.get("amount", 0.0),
|
|
593
|
+
fee=data.get("fee", 0.0),
|
|
594
|
+
status=data.get("status", "pending"),
|
|
595
|
+
timestamp=self._parse_timestamp(data.get("timestamp", datetime.now().isoformat())),
|
|
596
|
+
type="transfer",
|
|
597
|
+
memo=data.get("memo"),
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
def get_transactions(
|
|
601
|
+
self,
|
|
602
|
+
limit: int = 10,
|
|
603
|
+
offset: int = 0,
|
|
604
|
+
type: Optional[str] = None,
|
|
605
|
+
) -> List[Transaction]:
|
|
606
|
+
"""
|
|
607
|
+
Get transaction history.
|
|
608
|
+
|
|
609
|
+
Args:
|
|
610
|
+
limit: Maximum transactions to return
|
|
611
|
+
offset: Offset for pagination
|
|
612
|
+
type: Filter by type ("reward", "send", "receive", "stake")
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
List of Transaction objects.
|
|
616
|
+
"""
|
|
617
|
+
params = {"limit": limit, "offset": offset}
|
|
618
|
+
if type:
|
|
619
|
+
params["type"] = type
|
|
620
|
+
|
|
621
|
+
data = self.node._request("GET", "/api/v1/wallet/transactions", params=params)
|
|
622
|
+
|
|
623
|
+
transactions = []
|
|
624
|
+
for t in data.get("transactions", []):
|
|
625
|
+
transactions.append(Transaction(
|
|
626
|
+
id=t.get("id", ""),
|
|
627
|
+
from_address=t.get("from", ""),
|
|
628
|
+
to_address=t.get("to", ""),
|
|
629
|
+
amount=t.get("amount", 0.0),
|
|
630
|
+
fee=t.get("fee", 0.0),
|
|
631
|
+
status=t.get("status", "confirmed"),
|
|
632
|
+
timestamp=self._parse_timestamp(t.get("timestamp", datetime.now().isoformat())),
|
|
633
|
+
type=t.get("type", "transfer"),
|
|
634
|
+
memo=t.get("memo"),
|
|
635
|
+
))
|
|
636
|
+
return transactions
|
|
637
|
+
|
|
638
|
+
def stake(self, amount: float, duration_days: int) -> StakeResult:
|
|
639
|
+
"""
|
|
640
|
+
Stake NEURO tokens.
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
amount: Amount to stake
|
|
644
|
+
duration_days: Lock duration in days
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
StakeResult with stake details and multiplier.
|
|
648
|
+
"""
|
|
649
|
+
data = self.node._request("POST", "/api/v1/wallet/stake", data={
|
|
650
|
+
"amount": amount,
|
|
651
|
+
"duration_days": duration_days,
|
|
652
|
+
})
|
|
653
|
+
|
|
654
|
+
stake = data.get("stake", {})
|
|
655
|
+
return StakeResult(
|
|
656
|
+
amount=stake.get("amount", amount),
|
|
657
|
+
duration_days=stake.get("duration_days", duration_days),
|
|
658
|
+
start_date=date.fromisoformat(stake.get("start_date", date.today().isoformat())),
|
|
659
|
+
unlock_date=date.fromisoformat(stake.get("unlock_date", (date.today() + timedelta(days=duration_days)).isoformat())),
|
|
660
|
+
multiplier=stake.get("multiplier", 1.0),
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
def unstake(self, amount: float) -> UnstakeResult:
|
|
664
|
+
"""
|
|
665
|
+
Request unstaking.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
amount: Amount to unstake
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
UnstakeResult with cooldown info.
|
|
672
|
+
"""
|
|
673
|
+
data = self.node._request("POST", "/api/v1/wallet/unstake", data={
|
|
674
|
+
"amount": amount,
|
|
675
|
+
})
|
|
676
|
+
|
|
677
|
+
unstake = data.get("unstake", {})
|
|
678
|
+
return UnstakeResult(
|
|
679
|
+
amount=unstake.get("amount", amount),
|
|
680
|
+
cooldown_days=unstake.get("cooldown_days", 7),
|
|
681
|
+
available_date=date.fromisoformat(unstake.get("available_date", (date.today() + timedelta(days=7)).isoformat())),
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
def get_stake_info(self) -> StakeInfo:
|
|
685
|
+
"""
|
|
686
|
+
Get current staking information.
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
StakeInfo with stake amount, duration, and multiplier.
|
|
690
|
+
"""
|
|
691
|
+
data = self.node._request("GET", "/api/v1/stake/info")
|
|
692
|
+
|
|
693
|
+
return StakeInfo(
|
|
694
|
+
amount=data.get("stake", 0.0),
|
|
695
|
+
duration_days=data.get("duration_days", 0),
|
|
696
|
+
start_date=date.fromisoformat(data["start_date"]) if data.get("start_date") else None,
|
|
697
|
+
unlock_date=date.fromisoformat(data["unlock_date"]) if data.get("unlock_date") else None,
|
|
698
|
+
multiplier=data.get("stake_multiplier", 1.0),
|
|
699
|
+
pending_unstake=data.get("pending_unstake", 0.0),
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
def get_rewards(
|
|
703
|
+
self,
|
|
704
|
+
start_date: Optional[date] = None,
|
|
705
|
+
end_date: Optional[date] = None,
|
|
706
|
+
) -> RewardSummary:
|
|
707
|
+
"""
|
|
708
|
+
Get reward history.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
start_date: Filter from date
|
|
712
|
+
end_date: Filter to date
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
RewardSummary with totals and daily breakdown.
|
|
716
|
+
"""
|
|
717
|
+
params = {}
|
|
718
|
+
if start_date:
|
|
719
|
+
params["start_date"] = start_date.isoformat()
|
|
720
|
+
if end_date:
|
|
721
|
+
params["end_date"] = end_date.isoformat()
|
|
722
|
+
|
|
723
|
+
data = self.node._request("GET", "/api/v1/wallet/rewards", params=params)
|
|
724
|
+
|
|
725
|
+
by_day = []
|
|
726
|
+
for d in data.get("by_day", []):
|
|
727
|
+
by_day.append(DailyReward(
|
|
728
|
+
date=date.fromisoformat(d.get("date", date.today().isoformat())),
|
|
729
|
+
amount=d.get("amount", 0.0),
|
|
730
|
+
proofs=d.get("proofs", 0),
|
|
731
|
+
))
|
|
732
|
+
|
|
733
|
+
return RewardSummary(
|
|
734
|
+
total=data.get("total", 0.0),
|
|
735
|
+
by_day=by_day,
|
|
736
|
+
by_type=data.get("by_type", {}),
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
# ============================================================================
|
|
741
|
+
# ASYNC CLIENTS
|
|
742
|
+
# ============================================================================
|
|
743
|
+
|
|
744
|
+
class AsyncNeuroNode:
|
|
745
|
+
"""
|
|
746
|
+
Async version of NeuroNode.
|
|
747
|
+
|
|
748
|
+
Example:
|
|
749
|
+
async with AsyncNeuroNode("http://localhost:8000", api_token="TOKEN") as node:
|
|
750
|
+
status = await node.get_status()
|
|
751
|
+
response = await node.inference("Hello!")
|
|
752
|
+
"""
|
|
753
|
+
|
|
754
|
+
def __init__(
|
|
755
|
+
self,
|
|
756
|
+
url: str,
|
|
757
|
+
api_token: Optional[str] = None,
|
|
758
|
+
timeout: float = 30.0,
|
|
759
|
+
retry_attempts: int = 3,
|
|
760
|
+
):
|
|
761
|
+
self.url = url.rstrip("/")
|
|
762
|
+
self.api_token = api_token
|
|
763
|
+
self.timeout = timeout
|
|
764
|
+
self.retry_attempts = retry_attempts
|
|
765
|
+
self._session = None
|
|
766
|
+
|
|
767
|
+
async def __aenter__(self):
|
|
768
|
+
import aiohttp
|
|
769
|
+
headers = {"Content-Type": "application/json"}
|
|
770
|
+
if self.api_token:
|
|
771
|
+
headers["Authorization"] = f"Bearer {self.api_token}"
|
|
772
|
+
|
|
773
|
+
self._session = aiohttp.ClientSession(
|
|
774
|
+
headers=headers,
|
|
775
|
+
timeout=aiohttp.ClientTimeout(total=self.timeout),
|
|
776
|
+
)
|
|
777
|
+
return self
|
|
778
|
+
|
|
779
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
780
|
+
if self._session:
|
|
781
|
+
await self._session.close()
|
|
782
|
+
|
|
783
|
+
async def _request(
|
|
784
|
+
self,
|
|
785
|
+
method: str,
|
|
786
|
+
endpoint: str,
|
|
787
|
+
data: Optional[dict] = None,
|
|
788
|
+
params: Optional[dict] = None,
|
|
789
|
+
) -> dict:
|
|
790
|
+
"""Make an async HTTP request."""
|
|
791
|
+
import aiohttp
|
|
792
|
+
|
|
793
|
+
if not self._session:
|
|
794
|
+
raise RuntimeError("Session not initialized. Use 'async with' context manager.")
|
|
795
|
+
|
|
796
|
+
url = f"{self.url}{endpoint}"
|
|
797
|
+
|
|
798
|
+
last_error = None
|
|
799
|
+
for attempt in range(self.retry_attempts):
|
|
800
|
+
try:
|
|
801
|
+
async with self._session.request(
|
|
802
|
+
method=method,
|
|
803
|
+
url=url,
|
|
804
|
+
json=data,
|
|
805
|
+
params=params,
|
|
806
|
+
) as response:
|
|
807
|
+
if response.status == 401:
|
|
808
|
+
raise AuthenticationError()
|
|
809
|
+
elif response.status == 403:
|
|
810
|
+
raise ForbiddenError()
|
|
811
|
+
elif response.status == 404:
|
|
812
|
+
raise NotFoundError("Resource", endpoint)
|
|
813
|
+
elif response.status == 429:
|
|
814
|
+
retry_after = int(response.headers.get("Retry-After", 60))
|
|
815
|
+
raise RateLimitError(retry_after)
|
|
816
|
+
elif response.status >= 500:
|
|
817
|
+
raise InternalError()
|
|
818
|
+
elif response.status >= 400:
|
|
819
|
+
text = await response.text()
|
|
820
|
+
raise InvalidRequestError(text)
|
|
821
|
+
|
|
822
|
+
return await response.json()
|
|
823
|
+
|
|
824
|
+
except aiohttp.ClientError as e:
|
|
825
|
+
last_error = NodeOfflineError(self.url)
|
|
826
|
+
if attempt < self.retry_attempts - 1:
|
|
827
|
+
import asyncio
|
|
828
|
+
await asyncio.sleep(0.5 * (attempt + 1))
|
|
829
|
+
continue
|
|
830
|
+
raise last_error
|
|
831
|
+
|
|
832
|
+
if last_error:
|
|
833
|
+
raise last_error
|
|
834
|
+
raise NeuroShardError("Request failed after retries")
|
|
835
|
+
|
|
836
|
+
async def get_status(self) -> NodeStatus:
|
|
837
|
+
"""Get current node status."""
|
|
838
|
+
data = await self._request("GET", "/api/v1/status")
|
|
839
|
+
|
|
840
|
+
return NodeStatus(
|
|
841
|
+
node_id=data.get("node_id", ""),
|
|
842
|
+
version=data.get("version", ""),
|
|
843
|
+
uptime_seconds=data.get("uptime_seconds", 0),
|
|
844
|
+
status=data.get("status", "unknown"),
|
|
845
|
+
role=data.get("role", "unknown"),
|
|
846
|
+
layers=data.get("layers", []),
|
|
847
|
+
peer_count=data.get("peer_count", 0),
|
|
848
|
+
has_embedding=data.get("has_embedding", False),
|
|
849
|
+
has_lm_head=data.get("has_lm_head", False),
|
|
850
|
+
training=TrainingStatus(
|
|
851
|
+
enabled=data.get("training", {}).get("enabled", False),
|
|
852
|
+
epoch=data.get("training", {}).get("epoch", 0),
|
|
853
|
+
step=data.get("training", {}).get("step", 0),
|
|
854
|
+
loss=data.get("training", {}).get("loss", 0.0),
|
|
855
|
+
),
|
|
856
|
+
resources=ResourceStatus(
|
|
857
|
+
gpu_memory_used=data.get("resources", {}).get("gpu_memory_used", 0),
|
|
858
|
+
gpu_memory_total=data.get("resources", {}).get("gpu_memory_total", 0),
|
|
859
|
+
cpu_percent=data.get("resources", {}).get("cpu_percent", 0.0),
|
|
860
|
+
ram_used=data.get("resources", {}).get("ram_used", 0),
|
|
861
|
+
ram_total=data.get("resources", {}).get("ram_total", 0),
|
|
862
|
+
),
|
|
863
|
+
)
|
|
864
|
+
|
|
865
|
+
async def inference(
|
|
866
|
+
self,
|
|
867
|
+
prompt: str,
|
|
868
|
+
max_tokens: int = 100,
|
|
869
|
+
temperature: float = 1.0,
|
|
870
|
+
top_p: float = 1.0,
|
|
871
|
+
top_k: int = 50,
|
|
872
|
+
stop: Optional[List[str]] = None,
|
|
873
|
+
) -> InferenceResponse:
|
|
874
|
+
"""Run an inference request."""
|
|
875
|
+
data = await self._request("POST", "/api/v1/inference", data={
|
|
876
|
+
"prompt": prompt,
|
|
877
|
+
"max_tokens": max_tokens,
|
|
878
|
+
"temperature": temperature,
|
|
879
|
+
"top_p": top_p,
|
|
880
|
+
"top_k": top_k,
|
|
881
|
+
"stop": stop or [],
|
|
882
|
+
"stream": False,
|
|
883
|
+
})
|
|
884
|
+
|
|
885
|
+
return InferenceResponse(
|
|
886
|
+
id=data.get("id", ""),
|
|
887
|
+
text=data.get("text", ""),
|
|
888
|
+
tokens_generated=data.get("tokens_generated", 0),
|
|
889
|
+
finish_reason=data.get("finish_reason", "stop"),
|
|
890
|
+
usage=TokenUsage(
|
|
891
|
+
prompt_tokens=data.get("usage", {}).get("prompt_tokens", 0),
|
|
892
|
+
completion_tokens=data.get("usage", {}).get("completion_tokens", 0),
|
|
893
|
+
total_tokens=data.get("usage", {}).get("total_tokens", 0),
|
|
894
|
+
),
|
|
895
|
+
cost=Cost(
|
|
896
|
+
amount=data.get("cost", {}).get("amount", 0.0),
|
|
897
|
+
currency=data.get("cost", {}).get("currency", "NEURO"),
|
|
898
|
+
),
|
|
899
|
+
timing=Timing(
|
|
900
|
+
queue_ms=data.get("timing", {}).get("queue_ms", 0.0),
|
|
901
|
+
inference_ms=data.get("timing", {}).get("inference_ms", 0.0),
|
|
902
|
+
total_ms=data.get("timing", {}).get("total_ms", 0.0),
|
|
903
|
+
),
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
async def get_peers(self) -> List[PeerInfo]:
|
|
907
|
+
"""List connected peers."""
|
|
908
|
+
data = await self._request("GET", "/api/v1/peers")
|
|
909
|
+
|
|
910
|
+
peers = []
|
|
911
|
+
for p in data.get("peers", []):
|
|
912
|
+
peers.append(PeerInfo(
|
|
913
|
+
id=p.get("id", ""),
|
|
914
|
+
address=p.get("address", ""),
|
|
915
|
+
role=p.get("role", "worker"),
|
|
916
|
+
layers=p.get("layers", []),
|
|
917
|
+
latency_ms=p.get("latency_ms", 0.0),
|
|
918
|
+
))
|
|
919
|
+
return peers
|
|
920
|
+
|
|
921
|
+
async def get_layers(self) -> List[LayerInfo]:
|
|
922
|
+
"""List assigned layers."""
|
|
923
|
+
data = await self._request("GET", "/api/v1/layers")
|
|
924
|
+
|
|
925
|
+
layers = []
|
|
926
|
+
for l in data.get("layers", []):
|
|
927
|
+
layers.append(LayerInfo(
|
|
928
|
+
index=l.get("index", 0),
|
|
929
|
+
type=l.get("type", "transformer"),
|
|
930
|
+
memory_mb=l.get("memory_mb", 0),
|
|
931
|
+
status=l.get("status", "active"),
|
|
932
|
+
))
|
|
933
|
+
return layers
|
|
934
|
+
|
|
935
|
+
|
|
936
|
+
class AsyncNEUROLedger:
|
|
937
|
+
"""
|
|
938
|
+
Async version of NEUROLedger.
|
|
939
|
+
|
|
940
|
+
Example:
|
|
941
|
+
async with AsyncNeuroNode("http://localhost:8000", api_token="TOKEN") as node:
|
|
942
|
+
ledger = AsyncNEUROLedger(node)
|
|
943
|
+
balance = await ledger.get_balance()
|
|
944
|
+
"""
|
|
945
|
+
|
|
946
|
+
def __init__(self, node: AsyncNeuroNode):
|
|
947
|
+
self.node = node
|
|
948
|
+
|
|
949
|
+
async def get_balance(self) -> Balance:
|
|
950
|
+
"""Get wallet balance."""
|
|
951
|
+
data = await self.node._request("GET", "/api/v1/wallet/balance")
|
|
952
|
+
|
|
953
|
+
balances = data.get("balances", data)
|
|
954
|
+
return Balance(
|
|
955
|
+
address=data.get("address", ""),
|
|
956
|
+
available=balances.get("available", 0.0),
|
|
957
|
+
staked=balances.get("staked", 0.0),
|
|
958
|
+
pending=balances.get("pending", 0.0),
|
|
959
|
+
total=balances.get("total", 0.0),
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
async def send(self, to: str, amount: float, memo: Optional[str] = None) -> Transaction:
|
|
963
|
+
"""Send NEURO to another address."""
|
|
964
|
+
data = await self.node._request("POST", "/api/v1/wallet/send", data={
|
|
965
|
+
"to": to,
|
|
966
|
+
"amount": amount,
|
|
967
|
+
"memo": memo or "",
|
|
968
|
+
})
|
|
969
|
+
|
|
970
|
+
return Transaction(
|
|
971
|
+
id=data.get("transaction_id", ""),
|
|
972
|
+
from_address=data.get("from", ""),
|
|
973
|
+
to_address=data.get("to", ""),
|
|
974
|
+
amount=data.get("amount", 0.0),
|
|
975
|
+
fee=data.get("fee", 0.0),
|
|
976
|
+
status=data.get("status", "pending"),
|
|
977
|
+
timestamp=self._parse_timestamp(data.get("timestamp", datetime.now().isoformat())),
|
|
978
|
+
type="transfer",
|
|
979
|
+
memo=data.get("memo"),
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
async def get_stake_info(self) -> StakeInfo:
|
|
983
|
+
"""Get current staking information."""
|
|
984
|
+
data = await self.node._request("GET", "/api/v1/stake/info")
|
|
985
|
+
|
|
986
|
+
return StakeInfo(
|
|
987
|
+
amount=data.get("stake", 0.0),
|
|
988
|
+
multiplier=data.get("stake_multiplier", 1.0),
|
|
989
|
+
)
|
|
990
|
+
|