nexaroa 0.0.111__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. neuroshard/__init__.py +93 -0
  2. neuroshard/__main__.py +4 -0
  3. neuroshard/cli.py +466 -0
  4. neuroshard/core/__init__.py +92 -0
  5. neuroshard/core/consensus/verifier.py +252 -0
  6. neuroshard/core/crypto/__init__.py +20 -0
  7. neuroshard/core/crypto/ecdsa.py +392 -0
  8. neuroshard/core/economics/__init__.py +52 -0
  9. neuroshard/core/economics/constants.py +387 -0
  10. neuroshard/core/economics/ledger.py +2111 -0
  11. neuroshard/core/economics/market.py +975 -0
  12. neuroshard/core/economics/wallet.py +168 -0
  13. neuroshard/core/governance/__init__.py +74 -0
  14. neuroshard/core/governance/proposal.py +561 -0
  15. neuroshard/core/governance/registry.py +545 -0
  16. neuroshard/core/governance/versioning.py +332 -0
  17. neuroshard/core/governance/voting.py +453 -0
  18. neuroshard/core/model/__init__.py +30 -0
  19. neuroshard/core/model/dynamic.py +4186 -0
  20. neuroshard/core/model/llm.py +905 -0
  21. neuroshard/core/model/registry.py +164 -0
  22. neuroshard/core/model/scaler.py +387 -0
  23. neuroshard/core/model/tokenizer.py +568 -0
  24. neuroshard/core/network/__init__.py +56 -0
  25. neuroshard/core/network/connection_pool.py +72 -0
  26. neuroshard/core/network/dht.py +130 -0
  27. neuroshard/core/network/dht_plan.py +55 -0
  28. neuroshard/core/network/dht_proof_store.py +516 -0
  29. neuroshard/core/network/dht_protocol.py +261 -0
  30. neuroshard/core/network/dht_service.py +506 -0
  31. neuroshard/core/network/encrypted_channel.py +141 -0
  32. neuroshard/core/network/nat.py +201 -0
  33. neuroshard/core/network/nat_traversal.py +695 -0
  34. neuroshard/core/network/p2p.py +929 -0
  35. neuroshard/core/network/p2p_data.py +150 -0
  36. neuroshard/core/swarm/__init__.py +106 -0
  37. neuroshard/core/swarm/aggregation.py +729 -0
  38. neuroshard/core/swarm/buffers.py +643 -0
  39. neuroshard/core/swarm/checkpoint.py +709 -0
  40. neuroshard/core/swarm/compute.py +624 -0
  41. neuroshard/core/swarm/diloco.py +844 -0
  42. neuroshard/core/swarm/factory.py +1288 -0
  43. neuroshard/core/swarm/heartbeat.py +669 -0
  44. neuroshard/core/swarm/logger.py +487 -0
  45. neuroshard/core/swarm/router.py +658 -0
  46. neuroshard/core/swarm/service.py +640 -0
  47. neuroshard/core/training/__init__.py +29 -0
  48. neuroshard/core/training/checkpoint.py +600 -0
  49. neuroshard/core/training/distributed.py +1602 -0
  50. neuroshard/core/training/global_tracker.py +617 -0
  51. neuroshard/core/training/production.py +276 -0
  52. neuroshard/governance_cli.py +729 -0
  53. neuroshard/grpc_server.py +895 -0
  54. neuroshard/runner.py +3223 -0
  55. neuroshard/sdk/__init__.py +92 -0
  56. neuroshard/sdk/client.py +990 -0
  57. neuroshard/sdk/errors.py +101 -0
  58. neuroshard/sdk/types.py +282 -0
  59. neuroshard/tracker/__init__.py +0 -0
  60. neuroshard/tracker/server.py +864 -0
  61. neuroshard/ui/__init__.py +0 -0
  62. neuroshard/ui/app.py +102 -0
  63. neuroshard/ui/templates/index.html +1052 -0
  64. neuroshard/utils/__init__.py +0 -0
  65. neuroshard/utils/autostart.py +81 -0
  66. neuroshard/utils/hardware.py +121 -0
  67. neuroshard/utils/serialization.py +90 -0
  68. neuroshard/version.py +1 -0
  69. nexaroa-0.0.111.dist-info/METADATA +283 -0
  70. nexaroa-0.0.111.dist-info/RECORD +78 -0
  71. nexaroa-0.0.111.dist-info/WHEEL +5 -0
  72. nexaroa-0.0.111.dist-info/entry_points.txt +4 -0
  73. nexaroa-0.0.111.dist-info/licenses/LICENSE +190 -0
  74. nexaroa-0.0.111.dist-info/top_level.txt +2 -0
  75. protos/__init__.py +0 -0
  76. protos/neuroshard.proto +651 -0
  77. protos/neuroshard_pb2.py +160 -0
  78. protos/neuroshard_pb2_grpc.py +1298 -0
neuroshard/__init__.py ADDED
@@ -0,0 +1,93 @@
1
+ """
2
+ NeuroShard - Decentralized AI Training Network
3
+
4
+ Train LLMs together, earn NEURO tokens.
5
+
6
+ Quick Start:
7
+ # Install
8
+ pip install nexaroa
9
+
10
+ # Run a node
11
+ neuroshard --port 8000 --token YOUR_TOKEN
12
+
13
+ # Open dashboard
14
+ # http://localhost:8000/
15
+
16
+ SDK Usage:
17
+ from neuroshard import NeuroNode, NEUROLedger
18
+
19
+ # Connect to local node
20
+ node = NeuroNode("http://localhost:8000", api_token="YOUR_TOKEN")
21
+
22
+ # Check status
23
+ status = node.get_status()
24
+ print(f"Node: {status.node_id}")
25
+
26
+ # Run inference
27
+ response = node.inference("What is NeuroShard?", max_tokens=100)
28
+ print(response.text)
29
+
30
+ # Check balance
31
+ ledger = NEUROLedger(node)
32
+ balance = ledger.get_balance()
33
+ print(f"Balance: {balance.available} NEURO")
34
+
35
+ For more information:
36
+ https://neuroshard.com
37
+ https://docs.neuroshard.com
38
+ """
39
+
40
+ from neuroshard.version import __version__
41
+
42
+ # SDK Exports
43
+ from neuroshard.sdk import (
44
+ # Clients
45
+ NeuroNode,
46
+ NEUROLedger,
47
+ AsyncNeuroNode,
48
+ AsyncNEUROLedger,
49
+ # Types
50
+ NodeStatus,
51
+ Metrics,
52
+ InferenceResponse,
53
+ InferenceChunk,
54
+ PeerInfo,
55
+ LayerInfo,
56
+ Balance,
57
+ Transaction,
58
+ StakeInfo,
59
+ RewardSummary,
60
+ # Errors
61
+ NeuroShardError,
62
+ AuthenticationError,
63
+ InsufficientBalanceError,
64
+ RateLimitError,
65
+ NodeOfflineError,
66
+ )
67
+
68
+ __all__ = [
69
+ "__version__",
70
+ # Clients
71
+ "NeuroNode",
72
+ "NEUROLedger",
73
+ "AsyncNeuroNode",
74
+ "AsyncNEUROLedger",
75
+ # Types
76
+ "NodeStatus",
77
+ "Metrics",
78
+ "InferenceResponse",
79
+ "InferenceChunk",
80
+ "PeerInfo",
81
+ "LayerInfo",
82
+ "Balance",
83
+ "Transaction",
84
+ "StakeInfo",
85
+ "RewardSummary",
86
+ # Errors
87
+ "NeuroShardError",
88
+ "AuthenticationError",
89
+ "InsufficientBalanceError",
90
+ "RateLimitError",
91
+ "NodeOfflineError",
92
+ ]
93
+
neuroshard/__main__.py ADDED
@@ -0,0 +1,4 @@
1
+
2
+ if __name__ == "__main__":
3
+ from neuroshard.cli import main
4
+ main()
neuroshard/cli.py ADDED
@@ -0,0 +1,466 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ NeuroShard CLI - Main entry point for running a node
4
+
5
+ Usage:
6
+ neuroshard-node --port 8000 --token YOUR_TOKEN
7
+ neuroshard-node --daemon --token YOUR_TOKEN
8
+ neuroshard-node --stop
9
+ neuroshard-node --help
10
+
11
+ This starts a NeuroShard node that:
12
+ 1. Participates in distributed LLM training
13
+ 2. Earns NEURO tokens via Proof of Neural Work
14
+ 3. Serves a web dashboard at http://localhost:PORT/
15
+ """
16
+
17
+ import argparse
18
+ import sys
19
+ import os
20
+ import webbrowser
21
+ import threading
22
+ import time
23
+ import signal
24
+
25
+ from neuroshard.version import __version__
26
+
27
+ # Paths for daemon mode
28
+ NEUROSHARD_DIR = os.path.expanduser("~/.neuroshard")
29
+ PID_FILE = os.path.join(NEUROSHARD_DIR, "node.pid")
30
+ LOG_FILE = os.path.join(NEUROSHARD_DIR, "node.log")
31
+
32
+
33
+ def open_dashboard_delayed(port: int, delay: float = 3.0):
34
+ """Open the dashboard in browser after a delay (to let server start)."""
35
+ def opener():
36
+ time.sleep(delay)
37
+ url = f"http://localhost:{port}/"
38
+ print(f"\n[NODE] Opening dashboard: {url}")
39
+ try:
40
+ webbrowser.open(url)
41
+ except Exception as e:
42
+ print(f"[NODE] Could not open browser: {e}")
43
+ print(f"[NODE] Please manually open: {url}")
44
+
45
+ thread = threading.Thread(target=opener, daemon=True)
46
+ thread.start()
47
+
48
+
49
+ def ensure_neuroshard_dir():
50
+ """Ensure ~/.neuroshard directory exists."""
51
+ os.makedirs(NEUROSHARD_DIR, exist_ok=True)
52
+
53
+
54
+ def get_running_pid():
55
+ """Get PID of running daemon, or None if not running."""
56
+ if not os.path.exists(PID_FILE):
57
+ return None
58
+ try:
59
+ with open(PID_FILE, 'r') as f:
60
+ pid = int(f.read().strip())
61
+ # Check if process is actually running
62
+ os.kill(pid, 0)
63
+ return pid
64
+ except (ValueError, ProcessLookupError, PermissionError):
65
+ # PID file exists but process is dead
66
+ try:
67
+ os.remove(PID_FILE)
68
+ except:
69
+ pass
70
+ return None
71
+
72
+
73
+ def stop_daemon():
74
+ """Stop the running daemon."""
75
+ pid = get_running_pid()
76
+ if pid is None:
77
+ print("[NODE] No running daemon found")
78
+ return False
79
+
80
+ print(f"[NODE] Stopping daemon (PID {pid})...")
81
+ try:
82
+ os.kill(pid, signal.SIGTERM)
83
+ # Wait for process to stop
84
+ for _ in range(30): # 3 seconds max
85
+ time.sleep(0.1)
86
+ try:
87
+ os.kill(pid, 0)
88
+ except ProcessLookupError:
89
+ break
90
+ else:
91
+ # Force kill if still running
92
+ print("[NODE] Sending SIGKILL...")
93
+ os.kill(pid, signal.SIGKILL)
94
+
95
+ # Clean up PID file
96
+ if os.path.exists(PID_FILE):
97
+ os.remove(PID_FILE)
98
+ print("[NODE] ✓ Daemon stopped")
99
+ return True
100
+ except Exception as e:
101
+ print(f"[NODE] Error stopping daemon: {e}")
102
+ return False
103
+
104
+
105
+ def show_status():
106
+ """Show daemon status."""
107
+ pid = get_running_pid()
108
+ if pid:
109
+ print(f"[NODE] ✓ Daemon running (PID {pid})")
110
+ print(f"[NODE] Dashboard: http://localhost:8000/")
111
+ print(f"[NODE] Logs: {LOG_FILE}")
112
+ return True
113
+ else:
114
+ print("[NODE] ✗ Daemon not running")
115
+ return False
116
+
117
+
118
+ def tail_logs(lines: int = 50):
119
+ """Show recent log entries."""
120
+ if not os.path.exists(LOG_FILE):
121
+ print(f"[NODE] No log file found at {LOG_FILE}")
122
+ return
123
+
124
+ print(f"[NODE] Last {lines} lines of {LOG_FILE}:")
125
+ print("-" * 60)
126
+ try:
127
+ with open(LOG_FILE, 'r') as f:
128
+ all_lines = f.readlines()
129
+ for line in all_lines[-lines:]:
130
+ print(line, end='')
131
+ except Exception as e:
132
+ print(f"[NODE] Error reading logs: {e}")
133
+
134
+
135
+ def daemonize(port: int):
136
+ """Fork process to run as daemon."""
137
+ ensure_neuroshard_dir()
138
+
139
+ # Check if already running
140
+ existing_pid = get_running_pid()
141
+ if existing_pid:
142
+ print(f"[NODE] Daemon already running (PID {existing_pid})")
143
+ print(f"[NODE] Use 'neuroshard --stop' to stop it first")
144
+ sys.exit(1)
145
+
146
+ print(f"[NODE] Starting daemon...")
147
+ print(f"[NODE] Logs: {LOG_FILE}")
148
+ print(f"[NODE] PID file: {PID_FILE}")
149
+ print(f"[NODE] Dashboard: http://localhost:{port}/")
150
+
151
+ # First fork
152
+ try:
153
+ pid = os.fork()
154
+ if pid > 0:
155
+ # Parent exits
156
+ print(f"[NODE] ✓ Daemon started (PID {pid})")
157
+ sys.exit(0)
158
+ except OSError as e:
159
+ print(f"[NODE] Fork failed: {e}")
160
+ sys.exit(1)
161
+
162
+ # Decouple from parent
163
+ os.chdir("/")
164
+ os.setsid()
165
+ os.umask(0)
166
+
167
+ # Second fork
168
+ try:
169
+ pid = os.fork()
170
+ if pid > 0:
171
+ sys.exit(0)
172
+ except OSError as e:
173
+ sys.exit(1)
174
+
175
+ # Redirect standard file descriptors
176
+ sys.stdout.flush()
177
+ sys.stderr.flush()
178
+
179
+ # Open log file
180
+ log_fd = open(LOG_FILE, 'a')
181
+
182
+ # Redirect stdout/stderr to log file
183
+ os.dup2(log_fd.fileno(), sys.stdout.fileno())
184
+ os.dup2(log_fd.fileno(), sys.stderr.fileno())
185
+
186
+ # Close stdin
187
+ devnull = open('/dev/null', 'r')
188
+ os.dup2(devnull.fileno(), sys.stdin.fileno())
189
+
190
+ # Write PID file
191
+ with open(PID_FILE, 'w') as f:
192
+ f.write(str(os.getpid()))
193
+
194
+ # Register cleanup on exit
195
+ import atexit
196
+ def cleanup():
197
+ if os.path.exists(PID_FILE):
198
+ try:
199
+ os.remove(PID_FILE)
200
+ except:
201
+ pass
202
+ atexit.register(cleanup)
203
+
204
+ # Log startup
205
+ print(f"\n{'='*60}")
206
+ print(f"[DAEMON] NeuroShard daemon started at {time.strftime('%Y-%m-%d %H:%M:%S')}")
207
+ print(f"[DAEMON] PID: {os.getpid()}")
208
+ print(f"[DAEMON] Port: {port}")
209
+ print(f"{'='*60}\n")
210
+
211
+
212
+ def main():
213
+ """Main CLI entry point."""
214
+ parser = argparse.ArgumentParser(
215
+ description="NeuroShard Node - Decentralized AI Training",
216
+ formatter_class=argparse.RawDescriptionHelpFormatter,
217
+ epilog="""
218
+ Examples:
219
+ # Start a node (foreground)
220
+ neuroshard --token YOUR_TOKEN
221
+
222
+ # Start as background daemon
223
+ neuroshard --daemon --token YOUR_TOKEN
224
+
225
+ # Stop the daemon
226
+ neuroshard --stop
227
+
228
+ # Check daemon status
229
+ neuroshard --status
230
+
231
+ # View logs
232
+ neuroshard --logs
233
+
234
+ # Run on custom port
235
+ neuroshard --port 9000 --token YOUR_TOKEN
236
+
237
+ # Inference-only mode
238
+ neuroshard --token YOUR_TOKEN --no-training
239
+
240
+ Get your wallet token at: https://neuroshard.com/wallet
241
+ """
242
+ )
243
+
244
+ # Core options
245
+ parser.add_argument(
246
+ "--port", type=int, default=8000,
247
+ help="HTTP port for the node (default: 8000)"
248
+ )
249
+ parser.add_argument(
250
+ "--token", type=str, default=None,
251
+ help="Wallet token (64-char hex) or 12-word mnemonic phrase"
252
+ )
253
+ parser.add_argument(
254
+ "--tracker", type=str, default="https://neuroshard.com/api/tracker",
255
+ help="Tracker URL for peer discovery"
256
+ )
257
+
258
+ # Network options
259
+ parser.add_argument(
260
+ "--announce-ip", type=str, default=None,
261
+ help="Force this IP address for peer announcements"
262
+ )
263
+ parser.add_argument(
264
+ "--announce-port", type=int, default=None,
265
+ help="Force this port for peer announcements"
266
+ )
267
+
268
+ # Training options
269
+ parser.add_argument(
270
+ "--no-training", action="store_true",
271
+ help="Disable training (inference only)"
272
+ )
273
+ parser.add_argument(
274
+ "--diloco-steps", type=int, default=500,
275
+ help="DiLoCo inner steps before gradient sync (default: 500)"
276
+ )
277
+
278
+ # Device options
279
+ parser.add_argument(
280
+ "--device", type=str, default="auto",
281
+ choices=["auto", "cuda", "mps", "cpu"],
282
+ help="Compute device: auto (default), cuda, mps, or cpu"
283
+ )
284
+
285
+ # Resource limits
286
+ parser.add_argument(
287
+ "--memory", type=int, default=None,
288
+ help="Max memory in MB (default: auto-detect 70%% of system RAM)"
289
+ )
290
+ parser.add_argument(
291
+ "--cpu-threads", type=int, default=None,
292
+ help="Max CPU threads to use (default: all cores)"
293
+ )
294
+ parser.add_argument(
295
+ "--max-storage", type=int, default=100,
296
+ help="Max disk space for training data in MB (default: 100)"
297
+ )
298
+
299
+ # UI options
300
+ parser.add_argument(
301
+ "--no-browser", action="store_true",
302
+ help="Don't auto-open dashboard in browser"
303
+ )
304
+ parser.add_argument(
305
+ "--headless", action="store_true",
306
+ help="Don't auto-open browser (same as --no-browser)"
307
+ )
308
+
309
+ # Daemon options
310
+ parser.add_argument(
311
+ "--daemon", "-d", action="store_true",
312
+ help="Run as background daemon (logs to ~/.neuroshard/node.log)"
313
+ )
314
+ parser.add_argument(
315
+ "--stop", action="store_true",
316
+ help="Stop the running daemon"
317
+ )
318
+ parser.add_argument(
319
+ "--status", action="store_true",
320
+ help="Check if daemon is running"
321
+ )
322
+ parser.add_argument(
323
+ "--logs", action="store_true",
324
+ help="Show recent daemon logs"
325
+ )
326
+ parser.add_argument(
327
+ "--log-lines", type=int, default=50,
328
+ help="Number of log lines to show with --logs (default: 50)"
329
+ )
330
+
331
+ # Info options
332
+ parser.add_argument(
333
+ "--version", action="version",
334
+ version=f"NeuroShard {__version__}"
335
+ )
336
+
337
+ args = parser.parse_args()
338
+
339
+ # Handle daemon control commands first (before requiring token)
340
+ if args.stop:
341
+ success = stop_daemon()
342
+ sys.exit(0 if success else 1)
343
+
344
+ if args.status:
345
+ success = show_status()
346
+ sys.exit(0 if success else 1)
347
+
348
+ if args.logs:
349
+ tail_logs(args.log_lines)
350
+ sys.exit(0)
351
+
352
+ # Detect GPU before printing banner
353
+ gpu_status = "CPU"
354
+ gpu_color = ""
355
+ try:
356
+ import torch
357
+ if torch.cuda.is_available():
358
+ gpu_name = torch.cuda.get_device_name(0)
359
+ gpu_status = f"CUDA ({gpu_name})"
360
+ gpu_color = "\033[92m" # Green
361
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
362
+ gpu_status = "Apple Metal (MPS)"
363
+ gpu_color = "\033[92m" # Green
364
+ else:
365
+ gpu_status = "CPU (no GPU detected)"
366
+ gpu_color = "\033[93m" # Yellow
367
+ except ImportError:
368
+ gpu_status = "PyTorch not installed"
369
+ gpu_color = "\033[91m" # Red
370
+
371
+ reset_color = "\033[0m"
372
+
373
+ # Print banner
374
+ print(f"""
375
+ ╔══════════════════════════════════════════════════════════════╗
376
+ ║ ║
377
+ ║ ███╗ ██╗███████╗██╗ ██╗██████╗ ██████╗ ║
378
+ ║ ████╗ ██║██╔════╝██║ ██║██╔══██╗██╔═══██╗ ║
379
+ ║ ██╔██╗ ██║█████╗ ██║ ██║██████╔╝██║ ██║ ║
380
+ ║ ██║╚██╗██║██╔══╝ ██║ ██║██╔══██╗██║ ██║ ║
381
+ ║ ██║ ╚████║███████╗╚██████╔╝██║ ██║╚██████╔╝ ║
382
+ ║ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ║
383
+ ║ ║
384
+ ║ ███████╗██╗ ██╗ █████╗ ██████╗ ██████╗ ║
385
+ ║ ██╔════╝██║ ██║██╔══██╗██╔══██╗██╔══██╗ ║
386
+ ║ ███████╗███████║███████║██████╔╝██║ ██║ ║
387
+ ║ ╚════██║██╔══██║██╔══██║██╔══██╗██║ ██║ ║
388
+ ║ ███████║██║ ██║██║ ██║██║ ██║██████╔╝ ║
389
+ ║ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ ║
390
+ ║ ║
391
+ ║ Decentralized AI Training Network ║
392
+ ║ v{__version__:<10} ║
393
+ ╚══════════════════════════════════════════════════════════════╝
394
+ """)
395
+
396
+ # Print GPU status
397
+ print(f" {gpu_color}🖥️ Device: {gpu_status}{reset_color}")
398
+ print()
399
+
400
+ # Validate token
401
+ if not args.token:
402
+ print("[ERROR] Wallet token required!")
403
+ print()
404
+ print("Get your token at: https://neuroshard.com/wallet")
405
+ print()
406
+ print("Usage: neuroshard --token YOUR_TOKEN")
407
+ print(" neuroshard --daemon --token YOUR_TOKEN")
408
+ sys.exit(1)
409
+
410
+ # Daemonize if requested (must happen after banner so user sees feedback)
411
+ if args.daemon:
412
+ # Check platform - daemon mode only works on Unix
413
+ if sys.platform == 'win32':
414
+ print("[ERROR] Daemon mode not supported on Windows")
415
+ print("[NODE] Use Windows Task Scheduler or run in foreground")
416
+ sys.exit(1)
417
+ daemonize(args.port)
418
+
419
+ # Auto-open browser (unless disabled or daemon mode)
420
+ if not args.no_browser and not args.headless and not args.daemon:
421
+ open_dashboard_delayed(args.port)
422
+
423
+ # Import runner from the package
424
+ from neuroshard.runner import run_node
425
+
426
+ # Handle mnemonic input
427
+ node_token = args.token
428
+ if node_token:
429
+ words = node_token.strip().split()
430
+ if len(words) == 12:
431
+ try:
432
+ from mnemonic import Mnemonic
433
+ mnemo = Mnemonic("english")
434
+ if mnemo.check(node_token):
435
+ seed = mnemo.to_seed(node_token, passphrase="")
436
+ node_token = seed[:32].hex()
437
+ print("[NODE] ✅ Wallet recovered from mnemonic")
438
+ else:
439
+ print("[WARNING] Invalid mnemonic - treating as raw token")
440
+ except ImportError:
441
+ print("[WARNING] 'mnemonic' package not installed")
442
+ except Exception as e:
443
+ print(f"[WARNING] Mnemonic error: {e}")
444
+
445
+ # Run the node
446
+ print(f"[NODE] Starting on port {args.port}...")
447
+ print(f"[NODE] Dashboard: http://localhost:{args.port}/")
448
+ print()
449
+
450
+ run_node(
451
+ port=args.port,
452
+ tracker=args.tracker,
453
+ node_token=node_token,
454
+ announce_ip=args.announce_ip,
455
+ announce_port=args.announce_port,
456
+ enable_training=not args.no_training,
457
+ available_memory_mb=args.memory,
458
+ max_storage_mb=args.max_storage,
459
+ max_cpu_threads=args.cpu_threads,
460
+ diloco_inner_steps=args.diloco_steps,
461
+ device=args.device,
462
+ )
463
+
464
+
465
+ if __name__ == "__main__":
466
+ main()
@@ -0,0 +1,92 @@
1
+ # neuroshard/core/__init__.py
2
+ """
3
+ NeuroShard Core Module
4
+
5
+ Organized into subpackages:
6
+ - swarm: THE architecture (SwarmEnabledDynamicNode, SwarmRouter, etc.)
7
+ - model: Model components (DynamicNeuroNode, DynamicNeuroLLM)
8
+ - network: P2P, DHT, NAT traversal
9
+ - training: Training coordination, gradient compression
10
+ - economics: Token economics, ledger, wallet
11
+ - crypto: ECDSA cryptography
12
+
13
+ Primary entry point:
14
+ from neuroshard.core.swarm import create_swarm_node, SwarmNodeConfig
15
+ """
16
+
17
+ __all__ = [
18
+ # Swarm (Primary) - See neuroshard.core.swarm for full list
19
+ 'SwarmEnabledDynamicNode',
20
+ 'SwarmNodeConfig',
21
+ 'SwarmComponents',
22
+ 'create_swarm_node',
23
+ 'SwarmRouter',
24
+ 'SwarmHeartbeatService',
25
+ 'ActivationBuffer',
26
+ 'OutboundBuffer',
27
+ 'ComputeEngine',
28
+ 'DiLoCoTrainer',
29
+ 'RobustAggregator',
30
+ 'SpeculativeCheckpointer',
31
+ 'SwarmServiceMixin',
32
+ 'SwarmLogger',
33
+ # Model
34
+ 'DynamicNeuroNode',
35
+ 'DynamicLayerPool',
36
+ 'DynamicNeuroLLM',
37
+ 'create_dynamic_node',
38
+ # Network
39
+ 'P2PManager',
40
+ # Training
41
+ 'TrainingCoordinator',
42
+ 'GradientCompressor',
43
+ 'GlobalTrainingTracker',
44
+ # Economics
45
+ 'NEUROLedger',
46
+ # Crypto
47
+ 'sign_message',
48
+ 'verify_signature',
49
+ ]
50
+
51
+
52
+ def __getattr__(name):
53
+ """Lazy loading to avoid circular dependencies."""
54
+ # Swarm components
55
+ if name in ('SwarmEnabledDynamicNode', 'SwarmNodeConfig', 'SwarmComponents', 'create_swarm_node',
56
+ 'SwarmRouter', 'PeerCandidate', 'SwarmHeartbeatService', 'CapacityBitmask',
57
+ 'ActivationBuffer', 'OutboundBuffer', 'ActivationPacket', 'ActivationPriority',
58
+ 'ComputeEngine', 'StepOutcome',
59
+ 'DiLoCoTrainer', 'DiLoCoConfig', 'OuterOptimizer',
60
+ 'RobustAggregator', 'GradientValidator', 'AggregationStrategy', 'AggregationConfig', 'ValidationConfig',
61
+ 'SpeculativeCheckpointer', 'CheckpointConfig',
62
+ 'SwarmServiceMixin', 'SwarmNodeState',
63
+ 'SwarmLogger', 'LogCategory', 'NodeRole', 'get_swarm_logger', 'init_swarm_logger'):
64
+ from neuroshard.core import swarm
65
+ return getattr(swarm, name)
66
+ # Model components
67
+ elif name in ('DynamicNeuroNode', 'DynamicLayerPool', 'DynamicNeuroLLM', 'create_dynamic_node'):
68
+ from neuroshard.core import model
69
+ return getattr(model, name)
70
+ # Network components
71
+ elif name in ('P2PManager', 'P2PDataManager', 'DHT', 'DHTProtocol', 'DHTService',
72
+ 'NATTraverser', 'NATTraversalManager', 'STUNClient', 'NATType',
73
+ 'ConnectionPool', 'get_channel', 'TensorSerializer'):
74
+ from neuroshard.core import network
75
+ return getattr(network, name)
76
+ # Training components
77
+ elif name in ('TrainingCoordinator', 'FederatedDataManager', 'GenesisDataLoader',
78
+ 'GradientCompressor', 'CompressionConfig', 'CompressionMethod',
79
+ 'GlobalTrainingTracker', 'TrainingSnapshot', 'GlobalTrainingStats'):
80
+ from neuroshard.core import training
81
+ return getattr(training, name)
82
+ # Economics components
83
+ elif name in ('NEURO_DECIMALS', 'NEURO_TOTAL_SUPPLY', 'VALIDATOR_MIN_STAKE',
84
+ 'VALIDATOR_MIN_MEMORY_MB', 'NEUROLedger', 'PoNWProof', 'ProofType',
85
+ 'Wallet', 'InferenceMarket'):
86
+ from neuroshard.core import economics
87
+ return getattr(economics, name)
88
+ # Crypto components
89
+ elif name in ('sign_message', 'verify_signature', 'generate_keypair', 'is_valid_node_id_format'):
90
+ from neuroshard.core import crypto
91
+ return getattr(crypto, name)
92
+ raise AttributeError(f"module 'neuroshard.core' has no attribute '{name}'")